aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2019-02-06 21:00:29 -0800
committerLinux Build Service Account <lnxbuild@localhost>2019-02-06 21:00:29 -0800
commitefdc25857558d8d6011701a3f0056c29685fb0ee (patch)
treef5872224d319bd8df48f530e99fdbf64fd4bf819
parent8c873f417a683c8f447df8bf0165f5b61a80795f (diff)
parent505b1cd24748a6e71cbc7eada57dce05322faebd (diff)
Merge 505b1cd24748a6e71cbc7eada57dce05322faebd on remote branchLA.UM.7.8.r3-01100-SDM710.0
Change-Id: I888f5dc1bc93aec76a82563190b9f67f4682678a
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/qcom-audio-dev.txt191
-rw-r--r--arch/arm/configs/msm8909-perf_defconfig1
-rw-r--r--arch/arm/configs/msm8909_defconfig1
-rw-r--r--arch/arm/configs/msm8909w-perf_defconfig13
-rw-r--r--arch/arm/configs/msm8909w_defconfig2
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile11
-rw-r--r--arch/arm64/boot/dts/qcom/apq8009w-memory.dtsi35
-rw-r--r--arch/arm64/boot/dts/qcom/msm8909.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/msm8909w-memory.dtsi35
-rw-r--r--arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/pm8916.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/pmi632.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi98
-rw-r--r--arch/arm64/boot/dts/qcom/qm215-qrd-smb1360-overlay.dts22
-rw-r--r--arch/arm64/boot/dts/qcom/qm215-qrd-smb1360.dts24
-rw-r--r--arch/arm64/boot/dts/qcom/qm215-qrd-smb1360.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/qm215.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sdw2500-apq8009w-wtp.dts369
-rw-r--r--arch/arm64/boot/dts/qcom/sdw2500-msm8909w-wtp.dts388
-rw-r--r--arch/arm64/boot/dts/qcom/spi-panel-st7789v2-qvga-cmd.dtsi67
-rw-r--r--drivers/base/regmap/Kconfig8
-rw-r--r--drivers/char/diag/diag_dci.c4
-rw-r--r--drivers/char/diag/diag_masks.c1
-rw-r--r--drivers/char/diag/diagchar_core.c19
-rw-r--r--drivers/hid/uhid.c15
-rw-r--r--drivers/hwmon/qpnp-adc-voltage.c96
-rw-r--r--drivers/iommu/iommu-debug.c24
-rw-r--r--drivers/media/platform/msm/Kconfig15
-rw-r--r--drivers/media/platform/msm/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h7
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c40
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c42
-rw-r--r--drivers/media/platform/msm/camera_v3/Makefile13
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/Makefile9
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm.h261
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_core_common.c582
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_core_common.h51
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_hw_core.c1146
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_intf.c582
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_intf_api.h209
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_soc.c208
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_soc.h28
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_util.c716
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_util.h168
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_virtual.h25
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_virtual_core.c383
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cdm/cam_hw_cdm170_reg.h142
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/Makefile6
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_context.c591
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_context.h467
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_context_utils.c1013
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_context_utils.h40
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_core_defs.h44
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_hw.h53
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_hw_intf.h83
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_hw_mgr_intf.h334
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_node.c790
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_node.h111
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_core/cam_subdev.c161
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/Makefile10
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw.c1710
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw.h208
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw_intf.h135
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_intf.c662
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_soc.c291
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_soc.h80
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/camss_top/Makefile6
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/camss_top/cam_camsstop_hw.c89
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/Makefile6
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cam_cpastop_hw.c672
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cam_cpastop_hw.h273
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop100.h538
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v170_110.h555
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v175_100.h565
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v175_101.h565
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_cpas/include/cam_cpas_api.h527
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_context.c256
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_context.h37
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_dev.c214
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c1943
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h186
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h25
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/Makefile13
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c1168
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h244
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c237
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h289
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c290
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h53
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h70
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h70
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.c279
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.h49
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_subdev.c282
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_intf.h164
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_reg.h340
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_session_defs.h571
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_sys_defs.h535
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/hfi.c897
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/Makefile9
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/Makefile11
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_core.c483
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_core.h89
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_dev.c235
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_soc.c120
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_soc.h32
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/Makefile11
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_core.c405
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_core.h47
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_dev.c214
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_soc.c170
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_soc.h33
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/Makefile13
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c5003
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h369
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h81
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h44
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h36
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h44
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h47
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/Makefile11
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_core.c397
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_core.h47
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_dev.c206
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_soc.c173
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_soc.h33
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/Makefile10
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_context.c3360
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_context.h223
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_dev.c205
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_dev.h38
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_log.h26
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c5066
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h237
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c35
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h74
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/Makefile12
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c787
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c331
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h162
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h123
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile3
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c737
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h279
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h233
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/Makefile2
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile12
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c61
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h310
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c61
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c3432
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h487
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c147
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h23
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c59
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h319
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c57
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h320
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c258
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h126
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h184
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h227
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h302
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile15
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c829
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h99
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c204
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h42
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c293
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h116
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c52
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h842
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe_lite170.c52
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe_lite170.h336
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h850
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c57
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h336
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c55
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h120
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c3622
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h234
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h85
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile13
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c361
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h60
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c658
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h96
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c301
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h50
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c53
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c842
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h68
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h43
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/Makefile11
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_context.c210
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_context.h74
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_dev.c209
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_dev.h39
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/Makefile13
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c1614
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h162
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h44
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h23
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile10
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c195
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h52
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c240
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c63
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h25
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile10
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h79
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c426
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h87
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c238
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c63
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h25
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_context.c258
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_context.h41
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_dev.c242
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/Makefile14
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c1150
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h133
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h25
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile13
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c1273
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h458
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c320
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h202
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h193
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c158
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h28
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/Makefile11
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr.c1267
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr.h131
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr_api.h125
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core.c2895
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core.h470
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core_defs.h24
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_debug.c139
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_debug.h21
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_dev.c759
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_dev.h50
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_interface.h339
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_timer.c99
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_timer.h71
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util.c340
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util.h172
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util_priv.h49
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_workq.c277
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_workq.h150
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_subdev.h115
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/Makefile10
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/Makefile11
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_core.c958
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_core.h73
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_dev.c448
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_dev.h132
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_soc.c86
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_soc.h26
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/Makefile8
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.c1646
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.h46
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.c505
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.h314
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_hwreg.h80
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.c408
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.h52
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/Makefile9
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.c876
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.h59
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c253
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h263
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c284
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h81
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h355
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h506
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h399
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_0_hwreg.h296
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/Makefile8
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_core.c946
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_core.h28
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c582
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h190
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c385
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h23
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/Makefile11
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_core.c1587
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_core.h27
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_dev.c638
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_dev.h226
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_soc.c224
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_soc.h21
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/Makefile10
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_core.c820
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_core.h40
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_dev.c423
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_dev.h133
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_soc.c129
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_soc.h19
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/Makefile9
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr.c738
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h148
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h117
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/Makefile10
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_core.c1254
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_core.h93
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_dev.c392
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_dev.h127
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_soc.c293
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_soc.h25
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/Makefile9
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c239
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h182
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_io.c191
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_io.h121
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c537
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c617
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h111
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/Makefile9
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h390
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c1944
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h67
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_smmu/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_smmu/cam_smmu_api.c3523
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_smmu/cam_smmu_api.h392
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sync/Makefile3
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sync/cam_sync.c1093
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_api.h151
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_private.h203
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_util.c457
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_util.h152
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_common_util.c57
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_common_util.h55
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_debug_util.c124
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_debug_util.h141
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_io_util.c287
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_io_util.h239
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_packet_util.c264
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_packet_util.h130
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_soc_util.c1700
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_soc_util.h653
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_trace.c16
-rw-r--r--drivers/media/platform/msm/camera_v3/cam_utils/cam_trace.h309
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c68
-rw-r--r--drivers/media/platform/msm/vidc_3x/venus_hfi.c67
-rw-r--r--drivers/misc/qseecom.c321
-rw-r--r--drivers/mmc/card/block.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_nat.c27
-rw-r--r--drivers/power/supply/power_supply_sysfs.c1
-rw-r--r--drivers/power/supply/qcom/fg-alg.c21
-rw-r--r--drivers/power/supply/qcom/qg-core.h2
-rw-r--r--drivers/power/supply/qcom/qg-reg.h1
-rw-r--r--drivers/power/supply/qcom/qg-util.c5
-rw-r--r--drivers/power/supply/qcom/qpnp-qg.c95
-rw-r--r--drivers/power/supply/qcom/smb5-lib.c16
-rw-r--r--drivers/scsi/ufs/ufshcd.c4
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c6
-rw-r--r--drivers/usb/pd/policy_engine.c2
-rw-r--r--drivers/video/fbdev/msm/mdp3.c9
-rw-r--r--drivers/video/fbdev/msm/mdp3_ctrl.c18
-rw-r--r--drivers/video/fbdev/msm/mdp3_ctrl.h4
-rw-r--r--drivers/video/fbdev/msm/mdp3_dma.c21
-rw-r--r--drivers/video/fbdev/msm/mdp3_ppp.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c31
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c3
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/sched/sysctl.h7
-rw-r--r--include/trace/events/sched.h22
-rw-r--r--include/uapi/linux/qg.h3
-rw-r--r--include/uapi/media/cam_defs.h151
-rw-r--r--include/uapi/media/cam_icp.h4
-rw-r--r--include/uapi/media/cam_isp.h51
-rw-r--r--include/uapi/media/cam_isp_ife.h9
-rw-r--r--include/uapi/media/cam_req_mgr.h1
-rw-r--r--include/uapi/media/msmb_isp.h7
-rw-r--r--kernel/sched/energy.c2
-rw-r--r--kernel/sched/fair.c121
-rw-r--r--kernel/sched/sched.h33
-rw-r--r--kernel/sched/walt.c83
-rw-r--r--kernel/sysctl.c23
-rw-r--r--kernel/time/timer.c29
-rw-r--r--mm/memory.c10
396 files changed, 111560 insertions, 408 deletions
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
index 1e49e9642443..d8e459bc3769 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
@@ -308,6 +308,11 @@ First Level Node - QGAUGE device
Definition: Boolean property to support external-rsense based
configuration.
+- qcom,qg-vbms-mode
+ Usage: optional
+ Value type: <bool>
+ Definition: Boolean property to run QG in Voltage-only mode of QG.
+
- qcom,shutdown-temp-diff
Usage: optional
Value type: <u32>
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 6ad299d310c6..732130468121 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1408,6 +1408,197 @@ Example:
"SpkrRight", "SpkrLeft";
};
+* MSM8909 ASoC Machine driver
+
+Required properties:
+- compatible : "qcom,msm8909-audio-codec"
+- qcom,model : The user-visible name of this sound card.
+- reg : Offset and length of the register region(s) for MI2S/PCM MUX
+- reg-names : Register region name(s) referenced in reg above
+ Required register resource entries are:
+ "csr_gp_io_mux_mic_ctl": Physical address of MUX that controls
+ controls LPA IF tertiary, quad, PCM0, Digital Codec
+ and Secondary TLMM mux setting for mic path operation.
+ "csr_gp_io_mux_spkr_ctl": Physical address of MUX that controls
+ IF primary, secondary, Digital Codec and Primary TLMM
+ setting for speaker path operation.
+ "csr_gp_io_lpaif_pri_pcm_pri_mode_muxsel": Physical address of MUX
+ that controls the mux between LPA IF Quad and PCM0
+ path to secondary TLMM
+- qcom,msm-hs-micbias-type : This property is used to recognize the headset
+ micbias type, internal or external.
+- qcom,msm-ext-pa : This property is used to inform machine driver about
+ the connection of external PA over available MI2S interfaces,
+ following values can be given to this property.
+ primary -> Primary MI2S interface
+ secondary -> Secondary MI2S interface
+ tertiary -> Tertiary MI2S interface
+ quaternary -> Quaternary MI2S interface
+- qcom,msm-mbhc-hphl-swh: This property is used to distinguish headset HPHL
+switch type on target typically the switch type will be normally open or
+normally close, value for this property 0 for normally close and 1 for
+normally open.
+- qcom,msm-mbhc-gnd-swh: This property is used to distinguish headset GND
+switch type on target typically the switch type will be normally open or
+normally close, value for this property 0 for normally close and 1 for
+normally open.
+- qcom,audio-routing : A list of the connections between audio components.
+- qcom,msm-gpios : Lists down all the gpio sets that are supported.
+- qcom,pinctrl-names : Lists all the possible combinations of the gpio sets
+mentioned in qcom,msm-gpios.
+- pinctrl-names : The combinations of gpio sets from above that are supported in
+the flavor.
+- pinctrl-# : Pinctrl states as mentioned in pinctrl-names.
+
+Optional properties:
+- qcom,msm-afe-clk-ver: Provides detail of AFE clock API version.
+- qcom,hdmi-dba-codec-rx: Boolean. specifies if HDMI DBA audio support is enabled or not.
+- qcom,split-a2dp: Boolean. specifies if split a2dp audio support is enabled or not.
+- qcom,prim-auxpcm-gpio-clk : GPIO on which Primary AUXPCM clk signal is coming.
+- qcom,prim-auxpcm-gpio-sync : GPIO on which Primary AUXPCM SYNC signal is coming.
+- qcom,prim-auxpcm-gpio-din : GPIO on which Primary AUXPCM DIN signal is coming.
+- qcom,prim-auxpcm-gpio-dout : GPIO on which Primary AUXPCM DOUT signal is coming.
+- qcom,prim-auxpcm-gpio-set : set of GPIO lines used for Primary AUXPCM port
+- qcom,cdc-us-euro-gpios : GPIO on which gnd/mic swap signal is coming.
+- qcom,msm-micbias1-ext-cap : Boolean. Enable micbias1 external
+capacitor mode.
+- qcom,msm-micbias2-ext-cap : Boolean. Enable micbias2 external
+capacitor mode.
+- qcom,msm-spk-ext-pa : GPIO which enables external speaker pa.
+- qcom,subsys-name: This value provides the subsystem name where codec
+ is present. This property enables the codec driver to
+ register and receive subsytem restart notification from subsystem
+ and follow appropriate steps to ensure codec is in proper state
+ after subsytem restart. By default codec driver register
+ with ADSP subsystem.
+
+To Configure External Audio Switch
+- qcom,msm-ext-audio-switch : GPIO which controls external switch that switches
+ audio path between headset and speakers.
+- ext-switch-vdd-supply : Power supply that control external audio switch
+- qcom,ext-switch-vdd-voltage : Minimum and maximum voltage in uV to set for
+ power supply.
+- qcom,ext-switch-vdd-op-mode : Maxmum # of uA current the switch will draw
+ from the power supply.
+Example:
+ qcom,msm-ext-audio-switch = <&msm_gpio 2 0>; - gpio # and active_state
+ ext-switch-vdd-supply = <&pm8950_l13>; - Power Rail
+ qcom,ext-switch-vdd-voltage = <3075000 3075000>; - Min, Max uV voltage
+ qcom,ext-switch-vdd-op-mode = <5000>; - Operational current uA
+ Additional needs to add two additional qcom,audio-routings
+ "HEADPHONE", "VDD_EXT_AUDIO_SWITCH"
+ "SPK_OUT", "VDD_EXT_AUDIO_SWITCH"
+
+- qcom,msm-mclk-freq : This property is used to inform machine driver about
+mclk frequency needs to be configured for internal and external PA.
+- asoc-platform: This is phandle list containing the references to platform device
+ nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+ the platform names should match to that of the phandle order
+ given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+ that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+ cpu dai names should match to that of the phandle order given.
+- asoc-codec: This is phandle list containing the references to codec dai device
+ nodes that are used as part of the sound card dai-links.
+- asoc-codec-names: This property contains list of codec dai names. The order of the
+ codec dai names should match to that of the phandle order given
+ in "asoc-codec".
+- asoc-wsa-codec-names: This property contains list of wsa codec names. The names
+ should comply with the wsa nodes configurations.
+- asoc-wsa-codec-prefixes: This property contains list of wsa codec prefixes.
+- msm-vdd-wsa-switch-supply: WSA codec supply's regulator device tree node.
+- qcom,msm-vdd-wsa-switch-voltage: WSA codec supply's voltage level in mV.
+- qcom,msm-vdd-wsa-switch-current: WSA codec max current level in mA.
+
+Example:
+ sound {
+ compatible = "qcom,msm8909-audio-codec";
+ qcom,model = "msm8909-snd-card";
+ reg = <0xc051000 0x4>,
+ <0xc051004 0x4>,
+ <0xc055000 0x4>;
+ reg-names = "csr_gp_io_mux_mic_ctl",
+ "csr_gp_io_mux_spkr_ctl",
+ "csr_gp_io_lpaif_pri_pcm_pri_mode_muxsel";
+ qcom,msm-afe-clk-ver = <1>;
+ qcom,msm-ext-pa = "primary";
+ qcom,hdmi-dba-codec-rx;
+ qcom,split-a2dp;
+ qcom,msm-mclk-freq = <9600000>;
+ qcom,msm-mbhc-hphl-swh = <0>;
+ qcom,msm-mbhc-gnd-swh = <0>;
+ qcom,msm-hs-micbias-type = "internal";
+ qcom,msm-micbias1-ext-cap;
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK",
+ "SPK_RX_BIAS", "MCLK",
+ "INT_LDO_H", "MCLK",
+ "MIC BIAS External", "Handset Mic",
+ "MIC BIAS Internal2", "Headset Mic",
+ "MIC BIAS External", "Secondary Mic",
+ "AMIC1", "MIC BIAS External",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External";
+ qcom,msm-gpios =
+ "pri_i2s",
+ "us_eu_gpio";
+ qcom,pinctrl-names =
+ "all_off",
+ "pri_i2s_act",
+ "us_eu_gpio_act",
+ "pri_i2s_us_eu_gpio_act";
+ pinctrl-names =
+ "all_off",
+ "pri_i2s_act",
+ "us_eu_gpio_act",
+ "pri_i2s_us_eu_gpio_act";
+ pinctrl-0 = <&cdc_pdm_lines_sus &cdc_pdm_lines_2_sus &cross_conn_det_sus>;
+ pinctrl-1 = <&cdc_pdm_lines_act &cdc_pdm_lines_2_act &cross_conn_det_sus>;
+ pinctrl-2 = <&cdc_pdm_lines_sus &cdc_pdm_lines_2_sus &cross_conn_det_act>;
+ pinctrl-3 = <&cdc_pdm_lines_act &cdc_pdm_lines_2_act &cross_conn_det_act>;
+ qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+ qcom,prim-auxpcm-gpio-clk = <&msm_gpio 63 0>;
+ qcom,prim-auxpcm-gpio-sync = <&msm_gpio 64 0>;
+ qcom,prim-auxpcm-gpio-din = <&msm_gpio 65 0>;
+ qcom,prim-auxpcm-gpio-dout = <&msm_gpio 66 0>;
+ qcom,prim-auxpcm-gpio-set = "prim-gpio-prim";
+ qcom,tapan-codec-9302;
+ asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
+ <&loopback>, <&compress>, <&hostless>,
+ <&afe>, <&lsm>, <&routing>, <&lpa>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-voip-dsp", "msm-pcm-voice", "msm-pcm-loopback",
+ "msm-compress-dsp", "msm-pcm-hostless", "msm-pcm-afe",
+ "msm-lsm-client", "msm-pcm-routing", "msm-pcm-lpa";
+ asoc-cpu = <&dai_pri_auxpcm>, <&dai_hdmi>,
+ <&dai_mi2s0>, <&dai_mi2s1>, <&dai_mi2s2>, <&dai_mi2s3>,
+ <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+ <&sb_3_rx>, <&sb_3_tx>, <&sb_4_rx>, <&sb_4_tx>,
+ <&bt_sco_rx>, <&bt_sco_tx>, <&int_fm_rx>, <&int_fm_tx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>, <&afe_proxy_tx>,
+ <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>,
+ <&incall_music_2_rx>;
+ asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-hdmi.8",
+ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+ "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+ "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+ "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+ "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+ "msm-dai-q6-dev.12288", "msm-dai-q6-dev.12289",
+ "msm-dai-q6-dev.12292", "msm-dai-q6-dev.12293",
+ "msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+ "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770";
+ asoc-codec = <&stub>, <&pm8916_tombak_dig>;
+ asoc-codec-names = "msm-stub-codec.1", "tombak_codec";
+ asoc-wsa-codec-names = "wsa881x-i2c-codec.8-000f";
+ asoc-wsa-codec-prefixes = "SpkrMono";
+ };
+
* MSM8952 ASoC Machine driver
Required properties:
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index c9588bc93152..05e1e18db804 100644
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -430,6 +430,7 @@ CONFIG_IIO=y
CONFIG_INV_ICM20602_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index 428cd92dba78..e376082a9e8a 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -478,6 +478,7 @@ CONFIG_IIO=y
CONFIG_INV_ICM20602_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8909w-perf_defconfig b/arch/arm/configs/msm8909w-perf_defconfig
index e32da5f73d90..084e48707f58 100644
--- a/arch/arm/configs/msm8909w-perf_defconfig
+++ b/arch/arm/configs/msm8909w-perf_defconfig
@@ -15,7 +15,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
-CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
@@ -217,24 +216,15 @@ CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_SND_DIGCDC=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
CONFIG_QPNP_MISC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_UFSHCD=y
-CONFIG_SCSI_UFSHCD_PLATFORM=y
-CONFIG_SCSI_UFS_QCOM=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@@ -453,6 +443,7 @@ CONFIG_QCOM_SPMI_IADC=y
CONFIG_QCOM_SPMI_VADC=y
CONFIG_QCOM_RRADC=y
CONFIG_PWM=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index b54a44c5b255..cfe45d4b1bfa 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -215,6 +215,7 @@ CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_SND_DIGCDC=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -449,6 +450,7 @@ CONFIG_QCOM_SPMI_IADC=y
CONFIG_QCOM_SPMI_VADC=y
CONFIG_QCOM_RRADC=y
CONFIG_PWM=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 3081c77e350c..9b305fd30300 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -349,6 +349,7 @@ dtbo-$(CONFIG_ARCH_MSM8917) += msm8917-mtp-overlay.dtbo \
apq8017-cdp-wcd-rome-overlay.dtbo
dtbo-$(CONFIG_ARCH_QM215) +=qm215-qrd-overlay.dtbo
+dtbo-$(CONFIG_ARCH_QM215) +=qm215-qrd-smb1360-overlay.dtbo
dtbo-$(CONFIG_ARCH_MSM8953) += msm8953-mtp-overlay.dtbo \
msm8953-cdp-overlay.dtbo \
@@ -425,6 +426,7 @@ apq8017-cdp-wcd-rome-overlay.dtbo-base := apq8017-pmi8950.dtb \
apq8017-pmi8937.dtb
qm215-qrd-overlay.dtbo-base := qm215.dtb
+qm215-qrd-smb1360-overlay.dtbo-base := qm215.dtb
msm8953-mtp-overlay.dtbo-base := sdm450.dtb \
msm8953.dtb \
@@ -563,6 +565,7 @@ dtb-$(CONFIG_ARCH_MSM8917) += msm8917-pmi8950-mtp.dtb \
msm8917-pmi8940-rcm.dtb
dtb-$(CONFIG_ARCH_QM215) += qm215-qrd.dtb
+dtb-$(CONFIG_ARCH_QM215) += qm215-qrd-smb1360.dtb
dtb-$(CONFIG_ARCH_MSM8909) += sdw3100-msm8909w-wtp.dtb \
sdw3100-apq8009w-wtp.dtb \
@@ -573,7 +576,10 @@ dtb-$(CONFIG_ARCH_MSM8909) += sdw3100-msm8909w-wtp.dtb \
apq8009-robot-pronto-refboard.dtb \
apq8009-dragon.dtb \
apq8009-lat-v1.0.dtb \
- sdw3100-msm8909w-1gb-wtp.dtb
+ sdw3100-msm8909w-1gb-wtp.dtb \
+ sdw2500-apq8009w-wtp.dtb \
+ sdw2500-msm8909w-wtp.dtb
+
dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \
sdm450-cdp.dtb \
@@ -613,7 +619,8 @@ dtb-$(CONFIG_ARCH_SDM439) += sdm439-mtp.dtb \
sda439-cdp.dtb \
sdm439-external-codec-mtp.dtb \
sdm439-rcm.dtb \
- qm215-qrd.dtb
+ qm215-qrd.dtb \
+ qm215-qrd-smb1360.dtb
dtb-$(CONFIG_ARCH_SDM429) += sdm429-mtp.dtb \
sdm429-cdp.dtb \
diff --git a/arch/arm64/boot/dts/qcom/apq8009w-memory.dtsi b/arch/arm64/boot/dts/qcom/apq8009w-memory.dtsi
new file mode 100644
index 000000000000..fbac2383020d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8009w-memory.dtsi
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&external_image_mem {
+ reg = <0x0 0x87b00000 0x0 0x0500000>;
+};
+
+&modem_adsp_mem {
+ reg = <0x0 0x88000000 0x0 0x02100000>;
+};
+
+&peripheral_mem {
+ reg = <0x0 0x8a100000 0x0 0x0600000>;
+};
+
+&reserved_mem {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xa0000000>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x1000000>;
+ linux,cma-default;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909.dtsi b/arch/arm64/boot/dts/qcom/msm8909.dtsi
index 5b65d328988c..c29809481a76 100644
--- a/arch/arm64/boot/dts/qcom/msm8909.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -465,6 +465,16 @@
clock-names = "core", "iface";
};
+ blsp1_uart2: serial@78b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uart";
+ reg = <0x78b0000 0x200>;
+ interrupts = <0 108 0>;
+ status = "disabled";
+ clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+ <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+ clock-names = "core", "iface";
+ };
+
blsp1_uart2_hs: uart@78b0000 { /*BLSP1 UART2*/
compatible = "qcom,msm-hsuart-v14";
reg = <0x78b0000 0x200>,
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-memory.dtsi b/arch/arm64/boot/dts/qcom/msm8909w-memory.dtsi
new file mode 100644
index 000000000000..fd5df153039b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909w-memory.dtsi
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&external_image_mem {
+ reg = <0x0 0x87b00000 0x0 0x0500000>;
+};
+
+&modem_adsp_mem {
+ reg = <0x0 0x88000000 0x0 0x05000000>;
+};
+
+&peripheral_mem {
+ reg = <0x0 0x8d000000 0x0 0x0600000>;
+};
+
+&reserved_mem {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xa0000000>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x1000000>;
+ linux,cma-default;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi b/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
index 7eb4d38a2a90..f86bfebee046 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
@@ -135,8 +135,11 @@
CPU_COST_0: core-cost0 {
busy-cost-data = <
960000 159
- 1094000 207
+ 998400 172
+ 1094400 207
+ 1209600 244
1248000 256
+ 1305600 283
1401000 327
1497600 343
>;
@@ -147,8 +150,11 @@
CLUSTER_COST_0: cluster-cost0 {
busy-cost-data = <
960000 53
- 1094000 61
+ 998400 55
+ 1094400 61
+ 1209600 68
1248000 71
+ 1305600 77
1401000 85
1497600 88
>;
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index 45516be974c3..7c925d0d4464 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -180,6 +180,7 @@
compatible = "qcom,qpnp-linear-charger";
#address-cells = <1>;
#size-cells = <1>;
+ #cooling-cells = <2>;
qcom,vddmax-mv = <4200>;
qcom,vddsafe-mv = <4200>;
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index a6f29bf3e3bc..da15b33355ec 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -44,6 +44,7 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-vdd-reference = <1875>;
qcom,adc-full-scale-code = <0x70e4>;
+ qcom,pmic-revid = <&pmi632_revid>;
chan@0 {
label = "ref_gnd";
diff --git a/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi b/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
index 6b08005014ee..7cce2d7b3d2b 100644
--- a/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -476,6 +476,102 @@
};
};
};
+
+ xo-therm-step {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&pm8916_vadc 0x32>;
+ thermal-governor = "step_wise";
+
+ trips {
+ qm215_batt_trip0: qm215-batt-trip0 {
+ temperature = <41000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ qm215_modem_trip0: qm215-modem-trip0 {
+ temperature = <44000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+ qm215_batt_trip1: qm215-batt-trip1 {
+ temperature = <45000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+ qm215_modem_trip1: qm215-modem-trip1 {
+ temperature = <46000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ qm215_cpu_trip: qm215-cpu-trip {
+ temperature = <48000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ qm215_gpu_trip: qm215-gpu-trip {
+ temperature = <50000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ qm215_modem_trip2: qm215-modem-trip2 {
+ temperature = <60000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+
+ cooling-maps {
+ skin_cpu0 {
+ trip = <&qm215_cpu_trip>;
+ /* throttle from fmax to 1094400KHz */
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT
+ (THERMAL_MAX_LIMIT-2)>;
+ };
+ skin_cpu1 {
+ trip = <&qm215_cpu_trip>;
+ cooling-device = <&CPU1 THERMAL_NO_LIMIT
+ (THERMAL_MAX_LIMIT-2)>;
+ };
+ skin_cpu2 {
+ trip = <&qm215_cpu_trip>;
+ cooling-device = <&CPU2 THERMAL_NO_LIMIT
+ (THERMAL_MAX_LIMIT-2)>;
+ };
+ skin_cpu3 {
+ trip = <&qm215_cpu_trip>;
+ cooling-device = <&CPU3 THERMAL_NO_LIMIT
+ (THERMAL_MAX_LIMIT-2)>;
+ };
+ skin_gpu {
+ trip = <&qm215_gpu_trip>;
+ /* throttle from fmax to 400000000Hz */
+ cooling-device = <&msm_gpu
+ THERMAL_NO_LIMIT
+ (THERMAL_MAX_LIMIT-1)>;
+ };
+ modem_lvl1 {
+ trip = <&qm215_modem_trip0>;
+ cooling-device = <&modem_pa 1 1>;
+ };
+ modem_lvl2 {
+ trip = <&qm215_modem_trip1>;
+ cooling-device = <&modem_pa 2 2>;
+ };
+ modem_lvl3 {
+ trip = <&qm215_modem_trip2>;
+ cooling-device = <&modem_pa 3 3>;
+ };
+ battery_lvl1 {
+ trip = <&qm215_batt_trip0>;
+ cooling-device = <&pm8916_chg 1 1>;
+ };
+ battery_lvl2 {
+ trip = <&qm215_batt_trip1>;
+ cooling-device = <&pm8916_chg 2 2>;
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360-overlay.dts b/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360-overlay.dts
new file mode 100644
index 000000000000..2ada905efcb6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360-overlay.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "qm215-qrd-smb1360.dtsi"
+
+/ {
+ model = "QRD + SMB1360";
+ qcom,board-id = <0x0b 5>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360.dts b/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360.dts
new file mode 100644
index 000000000000..c64753db2317
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "qm215.dtsi"
+#include "qm215-pm8916.dtsi"
+#include "qm215-qrd-smb1360.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. qm215 SMB1360 QRD";
+ compatible = "qcom,qm215-qrd", "qcom,qm215", "qcom,qrd";
+ qcom,board-id = <0x0b 5>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360.dtsi b/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360.dtsi
new file mode 100644
index 000000000000..0885da1cb2e9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qm215-qrd-smb1360.dtsi
@@ -0,0 +1,13 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "qm215-qrd.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/qm215.dtsi b/arch/arm64/boot/dts/qcom/qm215.dtsi
index e805d93080c2..597ce27b1a2f 100644
--- a/arch/arm64/boot/dts/qcom/qm215.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -264,3 +264,13 @@
};
};
};
+
+&thermal_zones {
+ gpu0-step {
+ trips {
+ gpu-step-trip {
+ temperature = <85000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index a7c3a927b2ee..bf50ce61cec7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -376,6 +376,7 @@
2054400 876
2169600 900
2208000 924
+ 2304000 940
2361600 948
2400000 1170
2457600 1200
@@ -418,6 +419,7 @@
2054400 87
2169600 90
2208000 92
+ 2304000 93
2361600 94
2400000 117
2457600 120
diff --git a/arch/arm64/boot/dts/qcom/sdw2500-apq8009w-wtp.dts b/arch/arm64/boot/dts/qcom/sdw2500-apq8009w-wtp.dts
new file mode 100644
index 000000000000..c5b8c7544c7e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdw2500-apq8009w-wtp.dts
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8909-mtp.dtsi"
+#include "msm8909w-gpu.dtsi"
+#include "msm8909w.dtsi"
+#include "apq8009w-memory.dtsi"
+#include "8909w-pm660.dtsi"
+#include "spi-panel-st7789v2-qvga-cmd.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8009W-PM660 WTP SDW2500";
+ compatible = "qcom,apq8009-mtp", "qcom,apq8009", "qcom,mtp";
+ qcom,msm-id = <265 0>,
+ <301 0>;
+ qcom,board-id = <8 0x112>;
+ qcom,pmic-id = <0x0001001b 0x0 0x0 0x0>,
+ <0x0001011b 0x0 0x0 0x0>;
+};
+
+&soc {
+ /delete-node/ qcom,msm-cpufreq;
+ qcom,msm-cpufreq {
+ reg = <0 4>;
+ compatible = "qcom,msm-cpufreq";
+ clocks = <&clock_cpu clk_a7ssmux>,
+ <&clock_cpu clk_a7ssmux>,
+ <&clock_cpu clk_a7ssmux>,
+ <&clock_cpu clk_a7ssmux>;
+ clock-names = "cpu0_clk", "cpu1_clk",
+ "cpu2_clk", "cpu3_clk";
+ qcom,cpufreq-table =
+ < 400000 >,
+ < 800000 >,
+ < 1094400 >,
+ < 1267200 >;
+ };
+
+ i2c@78b7000 { /* BLSP1 QUP3 */
+ synaptics@20 {
+ compatible = "synaptics,dsx-i2c";
+ reg = <0x20>;
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <98 0x2008>;
+ vdd_ana-supply = <&pm660_l18>;
+ vcc_i2c-supply = <&pm660_l13>;
+ synaptics,pwr-reg-name = "vdd_ana";
+ synaptics,bus-reg-name = "vcc_i2c";
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend",
+ "pmx_ts_release";
+ pinctrl-0 = <&ts_int_active &ts_reset_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ pinctrl-2 = <&ts_release>;
+ synaptics,irq-gpio = <&msm_gpio 98 0x2008>;
+ synaptics,irq-on-state = <0>;
+ synaptics,irq-flags = <0x2008>;
+ synaptics,power-delay-ms = <200>;
+ synaptics,max-y-for-2d = <389>;
+ synaptics,wakeup-gestures-en = <1>;
+ synaptics,resume-in-workqueue;
+ synaptics,x-flip;
+ synaptics,y-flip;
+
+ synaptics,reset-gpio = <&msm_gpio 31 0x0>;
+ synaptics,reset-delay-ms = <200>;
+ synaptics,reset-on-state = <0>;
+ synaptics,reset-active-ms = <20>;
+ /delete-property/ synaptics,display-coords;
+ /delete-property/ synaptics,panel-coords;
+ /delete-property/ synaptics,power-down;
+ /delete-property/ synaptics,disable-gpios;
+ /delete-property/ synaptics,is_wake;
+ };
+
+ /delete-node/ it7260@46;
+ };
+
+ spi@78B8000 { /* BLSP1 QUP4 */
+ status = "ok";
+ qcom,mdss_spi_client {
+ reg = <0>;
+ compatible = "qcom,mdss-spi-client";
+ label = "MDSS SPI QUP4 CLIENT";
+ dc-gpio = <&msm_gpio 59 0>;
+ spi-max-frequency = <50000000>;
+ };
+ };
+
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
+
+ qcom,msm-thermal {
+ vdd-dig-supply = <&pm660_s2_floor_corner>;
+
+ msm_thermal_freq: qcom,vdd-apps-rstr {
+ qcom,vdd-rstr-reg = "vdd-apps";
+ qcom,levels = <1094400>;
+ qcom,freq-req;
+ };
+ };
+
+ qcom,bcl {
+ compatible = "qcom,bcl";
+ qcom,bcl-enable;
+ qcom,bcl-framework-interface;
+ qcom,bcl-freq-control-list = <&CPU0 &CPU1 &CPU2 &CPU3>;
+ qcom,bcl-hotplug-list = <&CPU2 &CPU3>;
+ qcom,bcl-soc-hotplug-list = <&CPU2 &CPU3>;
+ qcom,ibat-monitor {
+ qcom,low-threshold-uamp = <1000000>;
+ qcom,high-threshold-uamp = <2000000>;
+ qcom,mitigation-freq-khz = <1094400>;
+ qcom,vph-high-threshold-uv = <3500000>;
+ qcom,vph-low-threshold-uv = <3200000>;
+ qcom,soc-low-threshold = <10>;
+ qcom,thermal-handle = <&msm_thermal_freq>;
+ };
+ };
+
+ msm_digital_codec: msm-dig-codec@771c000 {
+ compatible = "qcom,msm-digital-codec";
+ reg = <0x0771c000 0x0>;
+
+ cdc-vdd-digital-supply = <&pm660_l11>;
+ qcom,cdc-vdd-digital-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-digital-current = <5000>;
+ qcom,cdc-on-demand-supplies = "cdc-vdd-digital";
+
+ qcom,subsys-name = "modem";
+ };
+
+ mdss_spi: qcom,mdss_spi {
+ compatible = "qcom,mdss-spi-display";
+ label = "mdss spi panel";
+
+ qcom,mdss-fb-map = <&mdss_fb0>;
+ qcom,mdss-mdp = <&mdss_mdp>;
+ vdd-supply = <&pm660_l18>;
+ vddio-supply = <&pm660_l11>;
+
+ qcom,panel-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,panel-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <3000000>;
+ qcom,supply-max-voltage = <3000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+ qcom,panel-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vddio";
+ qcom,supply-min-voltage = <1800000>;
+ qcom,supply-max-voltage = <3000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+ };
+ };
+};
+
+&qcom_seecom {
+ reg = <0x87a00000 0x200000>;
+ reg-names = "secapp-region";
+ status = "okay";
+};
+
+&external_image_mem {
+ reg = <0x0 0x87a00000 0x0 0x0600000>;
+};
+
+&modem_adsp_mem {
+ reg = <0x0 0x88000000 0x0 0x05600000>;
+};
+
+&peripheral_mem {
+ reg = <0x0 0x8d600000 0x0 0x0600000>;
+};
+
+&i2c_1 {
+ status = "disabled";
+};
+
+&spi_0 {
+ status = "disabled";
+};
+
+&i2c_4 {
+ status = "disabled";
+};
+
+&i2c_2 {
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&msm_gpio 50 0x00>;
+ qcom,nq-ven = <&msm_gpio 52 0x00>;
+ qcom,nq-firm = <&msm_gpio 38 0x00>;
+ qcom,nq-esepwr = <&msm_gpio 49 0x00>;
+ qcom,nq-clkreq = <&pm660_gpios 4 0x00>;
+ qcom,clk-src = "BBCLK3";
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <50 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active","nfc_suspend";
+ pinctrl-0 = <&nfcw_int_active &nfcv2k_disable_active>;
+ pinctrl-1 = <&nfcw_int_suspend &nfcv2k_disable_suspend>;
+ clocks = <&clock_rpm clk_bb_clk3_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
+&i2c_5 {
+ status = "disabled";
+};
+
+&sdhc_2 {
+ status = "disabled";
+};
+
+&blsp1_uart1 {
+ status = "disabled";
+};
+
+&blsp1_uart2 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console2_sleep>;
+};
+
+/* Pinctrl dt nodes for interrupt & reset gpio for Synaptics touch controller */
+&ts_int_active {
+ mux {
+ pins = "gpio98";
+ };
+
+ config {
+ pins = "gpio98";
+ };
+};
+
+&ts_int_suspend {
+ mux {
+ pins = "gpio98";
+ };
+
+ config {
+ pins = "gpio98";
+ };
+};
+
+&ts_reset_active {
+ mux {
+ pins = "gpio31";
+ };
+
+ config {
+ pins = "gpio31";
+ };
+};
+
+&ts_reset_suspend {
+ mux {
+ pins = "gpio31";
+ };
+
+ config {
+ pins = "gpio31";
+ };
+};
+
+&ts_release {
+ mux {
+ pins = "gpio98", "gpio31";
+ };
+
+ config {
+ pins = "gpio98", "gpio31";
+ };
+};
+
+&vendor_fstab {
+ fsmgr_flags = "wait";
+};
+
+&mdss_dsi0{
+ qcom,dsi-pref-prim-pan = <&dsi_auo_390p_cmd>;
+ qcom,platform-bklight-en-gpio = <&msm_gpio 37 0>;
+};
+
+&spi_st7789v2_qvga_cmd {
+ qcom,panel-supply-entries = <&dsi_pm660_panel_pwr_supply>;
+ qcom,mdss-spi-bl-pmic-pwm-frequency = <100>;
+ qcom,mdss-spi-bl-pmic-bank-select = <0>;
+ qcom,cont-splash-enabled;
+};
+
+&mdss_spi {
+ qcom,spi-pref-prim-pan = <&spi_st7789v2_qvga_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_te_active>;
+ pinctrl-1 = <&mdss_te_suspend>;
+
+ qcom,platform-te-gpio = <&msm_gpio 24 0>;
+ qcom,platform-reset-gpio = <&msm_gpio 25 0>;
+ qcom,platform-spi-dc-gpio = <&msm_gpio 59 0>;
+
+ vdd-supply = <&pm660_l18>;
+ vddio-supply = <&pm660_l11>;
+};
+
+&dai_mi2s3 {
+ qcom,msm-mi2s-rx-lines = <1>;
+ qcom,msm-mi2s-tx-lines = <2>;
+};
+
+&audio_codec_mtp {
+ compatible = "qcom,msm8909-audio-codec";
+ qcom,model = "msm8909w-wtp-snd-card";
+ qcom,msm-ext-pa = "quaternary";
+ qcom,split-a2dp;
+ qcom,audio-routing =
+ "CDC_CONN", "MCLK",
+ "QUAT_MI2S_RX", "DIGIT_REGULATOR",
+ "TX_I2S_CLK", "DIGIT_REGULATOR",
+ "DMIC1", "Digital Mic1",
+ "DMIC2", "Digital Mic2";
+ qcom,msm-gpios =
+ "quat_i2s",
+ "dmic";
+ qcom,pinctrl-names =
+ "all_off",
+ "quat_i2s_act",
+ "dmic_act",
+ "quat_i2s_dmic_act";
+ pinctrl-names =
+ "all_off",
+ "quat_i2s_act",
+ "dmic_act",
+ "quat_i2s_dmic_act";
+ pinctrl-0 = <&quat_mi2s_sleep &quat_mi2s_din_sleep
+ &cdc_dmic0_clk_sus &cdc_dmic0_data_sus>;
+ pinctrl-1 = <&quat_mi2s_active &quat_mi2s_din_active
+ &cdc_dmic0_clk_sus &cdc_dmic0_data_sus>;
+ pinctrl-2 = <&quat_mi2s_sleep &quat_mi2s_din_sleep
+ &cdc_dmic0_clk_act &cdc_dmic0_data_act>;
+ pinctrl-3 = <&quat_mi2s_active &quat_mi2s_din_active
+ &cdc_dmic0_clk_act &cdc_dmic0_data_act>;
+ /delete-property/qcom,cdc-us-euro-gpios;
+
+ asoc-codec = <&stub_codec>, <&msm_digital_codec>;
+ asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdw2500-msm8909w-wtp.dts b/arch/arm64/boot/dts/qcom/sdw2500-msm8909w-wtp.dts
new file mode 100644
index 000000000000..1ee14ac3ce51
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdw2500-msm8909w-wtp.dts
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8909-mtp.dtsi"
+#include "msm8909w-gpu.dtsi"
+#include "msm8909w.dtsi"
+#include "msm8909w-memory.dtsi"
+#include "8909w-pm660.dtsi"
+#include "spi-panel-st7789v2-qvga-cmd.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8909W-PM660 WTP SDW2500";
+ compatible = "qcom,msm8909-mtp", "qcom,msm8909", "qcom,mtp";
+ qcom,msm-id = <245 0>,
+ <258 0>,
+ <275 0>,
+ <300 0>;
+ qcom,board-id = <8 0x112>;
+ qcom,pmic-id = <0x0001001b 0x0 0x0 0x0>,
+ <0x0001011b 0x0 0x0 0x0>;
+};
+
+&soc {
+ /delete-node/ qcom,msm-cpufreq;
+ qcom,msm-cpufreq {
+ reg = <0 4>;
+ compatible = "qcom,msm-cpufreq";
+ clocks = <&clock_cpu clk_a7ssmux>,
+ <&clock_cpu clk_a7ssmux>,
+ <&clock_cpu clk_a7ssmux>,
+ <&clock_cpu clk_a7ssmux>;
+ clock-names = "cpu0_clk", "cpu1_clk",
+ "cpu2_clk", "cpu3_clk";
+ qcom,cpufreq-table =
+ < 400000 >,
+ < 800000 >,
+ < 1094400 >,
+ < 1267200 >;
+ };
+
+ i2c@78b7000 { /* BLSP1 QUP3 */
+ synaptics@20 {
+ compatible = "synaptics,dsx-i2c";
+ reg = <0x20>;
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <98 0x2008>;
+ vdd_ana-supply = <&pm660_l18>;
+ vcc_i2c-supply = <&pm660_l13>;
+ synaptics,pwr-reg-name = "vdd_ana";
+ synaptics,bus-reg-name = "vcc_i2c";
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend",
+ "pmx_ts_release";
+ pinctrl-0 = <&ts_int_active &ts_reset_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ pinctrl-2 = <&ts_release>;
+ synaptics,irq-gpio = <&msm_gpio 98 0x2008>;
+ synaptics,irq-on-state = <0>;
+ synaptics,irq-flags = <0x2008>;
+ synaptics,power-delay-ms = <200>;
+ synaptics,max-y-for-2d = <389>;
+ synaptics,wakeup-gestures-en = <1>;
+ synaptics,resume-in-workqueue;
+ synaptics,x-flip;
+ synaptics,y-flip;
+
+ synaptics,reset-gpio = <&msm_gpio 31 0x0>;
+ synaptics,reset-delay-ms = <200>;
+ synaptics,reset-on-state = <0>;
+ synaptics,reset-active-ms = <20>;
+ /delete-property/ synaptics,display-coords;
+ /delete-property/ synaptics,panel-coords;
+ /delete-property/ synaptics,power-down;
+ /delete-property/ synaptics,disable-gpios;
+ /delete-property/ synaptics,is_wake;
+ };
+
+ /delete-node/ it7260@46;
+ };
+
+ spi@78B8000 { /* BLSP1 QUP4 */
+ status = "ok";
+ qcom,mdss_spi_client {
+ reg = <0>;
+ compatible = "qcom,mdss-spi-client";
+ label = "MDSS SPI QUP4 CLIENT";
+ dc-gpio = <&msm_gpio 59 0>;
+ spi-max-frequency = <50000000>;
+ };
+ };
+
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
+
+ qcom,msm-thermal {
+ vdd-dig-supply = <&pm660_s2_floor_corner>;
+
+ msm_thermal_freq: qcom,vdd-apps-rstr {
+ qcom,vdd-rstr-reg = "vdd-apps";
+ qcom,levels = <1094400>;
+ qcom,freq-req;
+ };
+ };
+
+ qcom,bcl {
+ compatible = "qcom,bcl";
+ qcom,bcl-enable;
+ qcom,bcl-framework-interface;
+ qcom,bcl-freq-control-list = <&CPU0 &CPU1 &CPU2 &CPU3>;
+ qcom,bcl-hotplug-list = <&CPU2 &CPU3>;
+ qcom,bcl-soc-hotplug-list = <&CPU2 &CPU3>;
+ qcom,ibat-monitor {
+ qcom,low-threshold-uamp = <1000000>;
+ qcom,high-threshold-uamp = <2000000>;
+ qcom,mitigation-freq-khz = <1094400>;
+ qcom,vph-high-threshold-uv = <3500000>;
+ qcom,vph-low-threshold-uv = <3200000>;
+ qcom,soc-low-threshold = <10>;
+ qcom,thermal-handle = <&msm_thermal_freq>;
+ };
+ };
+
+ msm_digital_codec: msm-dig-codec@771c000 {
+ compatible = "qcom,msm-digital-codec";
+ reg = <0x0771c000 0x0>;
+ qcom,no-analog-codec;
+
+ cdc-vdd-digital-supply = <&pm660_l11>;
+ qcom,cdc-vdd-digital-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-digital-current = <10000>;
+ qcom,cdc-on-demand-supplies = "cdc-vdd-digital";
+
+ qcom,subsys-name = "modem";
+ };
+
+ mdss_spi: qcom,mdss_spi {
+ compatible = "qcom,mdss-spi-display";
+ label = "mdss spi panel";
+
+ qcom,mdss-fb-map = <&mdss_fb0>;
+ qcom,mdss-mdp = <&mdss_mdp>;
+ vdd-supply = <&pm660_l18>;
+ vddio-supply = <&pm660_l11>;
+
+ qcom,panel-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,panel-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <3000000>;
+ qcom,supply-max-voltage = <3000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+ qcom,panel-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vddio";
+ qcom,supply-min-voltage = <1800000>;
+ qcom,supply-max-voltage = <3000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+ };
+ };
+
+ cdc_dmic_gpios: cdc_dmic_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_dmic0_clk_act &cdc_dmic0_data_act>;
+ pinctrl-1 = <&cdc_dmic0_clk_sus &cdc_dmic0_data_sus>;
+ };
+
+ cdc_quat_mi2s_gpios: msm_cdc_pinctrl_quat {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&quat_mi2s_active &quat_mi2s_din_active>;
+ pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_din_sleep>;
+ };
+};
+
+&qcom_seecom {
+ reg = <0x87a00000 0x200000>;
+ reg-names = "secapp-region";
+ status = "okay";
+};
+
+&external_image_mem {
+ reg = <0x0 0x87a00000 0x0 0x0600000>;
+};
+
+&modem_adsp_mem {
+ reg = <0x0 0x88000000 0x0 0x05600000>;
+};
+
+&peripheral_mem {
+ reg = <0x0 0x8d600000 0x0 0x0600000>;
+};
+
+&i2c_1 {
+ status = "disabled";
+};
+
+&spi_0 {
+ status = "disabled";
+};
+
+&i2c_4 {
+ status = "disabled";
+};
+
+&i2c_2 {
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&msm_gpio 50 0x00>;
+ qcom,nq-ven = <&msm_gpio 52 0x00>;
+ qcom,nq-firm = <&msm_gpio 38 0x00>;
+ qcom,nq-esepwr = <&msm_gpio 49 0x00>;
+ qcom,nq-clkreq = <&pm660_gpios 4 0x00>;
+ qcom,clk-src = "BBCLK3";
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <50 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active","nfc_suspend";
+ pinctrl-0 = <&nfcw_int_active &nfcv2k_disable_active>;
+ pinctrl-1 = <&nfcw_int_suspend &nfcv2k_disable_suspend>;
+ clocks = <&clock_rpm clk_bb_clk3_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
+&i2c_5 {
+ status = "disabled";
+};
+
+&sdhc_2 {
+ status = "disabled";
+};
+
+&blsp1_uart1 {
+ status = "disabled";
+};
+
+&blsp1_uart2 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console2_sleep>;
+};
+
+/* Pinctrl dt nodes for interrupt & reset gpio for Synaptics touch controller */
+&ts_int_active {
+ mux {
+ pins = "gpio98";
+ };
+
+ config {
+ pins = "gpio98";
+ };
+};
+
+&ts_int_suspend {
+ mux {
+ pins = "gpio98";
+ };
+
+ config {
+ pins = "gpio98";
+ };
+};
+
+&ts_reset_active {
+ mux {
+ pins = "gpio31";
+ };
+
+ config {
+ pins = "gpio31";
+ };
+};
+
+&ts_reset_suspend {
+ mux {
+ pins = "gpio31";
+ };
+
+ config {
+ pins = "gpio31";
+ };
+};
+
+&ts_release {
+ mux {
+ pins = "gpio98", "gpio31";
+ };
+
+ config {
+ pins = "gpio98", "gpio31";
+ };
+};
+
+&vendor_fstab {
+ fsmgr_flags = "wait";
+};
+
+&mdss_dsi0{
+ qcom,dsi-pref-prim-pan = <&dsi_auo_390p_cmd>;
+ qcom,platform-bklight-en-gpio = <&msm_gpio 37 0>;
+};
+
+&spi_st7789v2_qvga_cmd {
+ qcom,panel-supply-entries = <&dsi_pm660_panel_pwr_supply>;
+ qcom,mdss-spi-bl-pmic-pwm-frequency = <100>;
+ qcom,mdss-spi-bl-pmic-bank-select = <0>;
+ qcom,cont-splash-enabled;
+};
+
+&mdss_spi {
+ qcom,spi-pref-prim-pan = <&spi_st7789v2_qvga_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_te_active>;
+ pinctrl-1 = <&mdss_te_suspend>;
+
+ qcom,platform-te-gpio = <&msm_gpio 24 0>;
+ qcom,platform-reset-gpio = <&msm_gpio 25 0>;
+ qcom,platform-spi-dc-gpio = <&msm_gpio 59 0>;
+
+ vdd-supply = <&pm660_l18>;
+ vddio-supply = <&pm660_l11>;
+};
+
+&dai_mi2s3 {
+ qcom,msm-mi2s-rx-lines = <1>;
+ qcom,msm-mi2s-tx-lines = <2>;
+};
+
+&audio_codec_mtp {
+ compatible = "qcom,msm8909-audio-codec";
+ qcom,model = "msm8909w-wtp-snd-card";
+ qcom,msm-ext-pa = "quaternary";
+ qcom,split-a2dp;
+ qcom,audio-routing =
+ "CDC_CONN", "MCLK",
+ "QUAT_MI2S_RX", "DIGITAL_REGULATOR",
+ "TX_I2S_CLK", "DIGITAL_REGULATOR",
+ "DMIC1", "Digital Mic1",
+ "DMIC2", "Digital Mic2";
+ qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+ qcom,quat-mi2s-gpios = <&cdc_quat_mi2s_gpios>;
+ qcom,msm-gpios =
+ "quat_i2s",
+ "dmic";
+ qcom,pinctrl-names =
+ "all_off",
+ "quat_i2s_act",
+ "dmic_act",
+ "quat_i2s_dmic_act";
+ pinctrl-names =
+ "all_off",
+ "quat_i2s_act",
+ "dmic_act",
+ "quat_i2s_dmic_act";
+ pinctrl-0 = <&quat_mi2s_sleep &quat_mi2s_din_sleep
+ &cdc_dmic0_clk_sus &cdc_dmic0_data_sus>;
+ pinctrl-1 = <&quat_mi2s_active &quat_mi2s_din_active
+ &cdc_dmic0_clk_sus &cdc_dmic0_data_sus>;
+ pinctrl-2 = <&quat_mi2s_sleep &quat_mi2s_din_sleep
+ &cdc_dmic0_clk_act &cdc_dmic0_data_act>;
+ pinctrl-3 = <&quat_mi2s_active &quat_mi2s_din_active
+ &cdc_dmic0_clk_act &cdc_dmic0_data_act>;
+ /delete-property/qcom,cdc-us-euro-gpios;
+
+ asoc-codec = <&stub_codec>, <&msm_digital_codec>;
+ asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec";
+};
diff --git a/arch/arm64/boot/dts/qcom/spi-panel-st7789v2-qvga-cmd.dtsi b/arch/arm64/boot/dts/qcom/spi-panel-st7789v2-qvga-cmd.dtsi
new file mode 100644
index 000000000000..a0d5c3617a9b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/spi-panel-st7789v2-qvga-cmd.dtsi
@@ -0,0 +1,67 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ spi_st7789v2_qvga_cmd: qcom,mdss_spi_st7789v2_qvga_cmd {
+ qcom,mdss-spi-panel-name =
+ "st7789v2 qvga command mode spi panel";
+ qcom,mdss-spi-panel-destination = "display_1";
+ qcom,mdss-spi-panel-controller = <&mdss_spi>;
+ qcom,mdss-spi-panel-framerate = <27>;
+ qcom,mdss-spi-panel-width = <240>;
+ qcom,mdss-spi-panel-height = <240>;
+ qcom,mdss-spi-h-front-porch = <79>;
+ qcom,mdss-spi-h-back-porch = <59>;
+ qcom,mdss-spi-h-pulse-width = <60>;
+ qcom,mdss-spi-v-back-porch = <10>;
+ qcom,mdss-spi-v-front-porch = <7>;
+ qcom,mdss-spi-v-pulse-width = <2>;
+ qcom,mdss-spi-h-left-border = <0>;
+ qcom,mdss-spi-h-right-border = <0>;
+ qcom,mdss-spi-v-top-border = <0>;
+ qcom,mdss-spi-v-bottom-border = <0>;
+ qcom,mdss-spi-bpp = <16>;
+ qcom,mdss-spi-on-command = [
+ 96 01 11
+ 00 02 36 00
+ 00 02 3A 05
+ 00 02 35 00
+ 00 06 B2 0C 0C 00 33 33
+ 00 02 B7 75
+ 00 02 BB 3D
+ 00 02 C2 01
+ 00 02 C3 19
+ 00 02 04 20
+ 00 02 C6 0F
+ 00 03 D0 A4 A1
+ 00 0F E0 70 04 08 09 09 05 2A 33
+ 41 07 13 13 29 2F
+ 00 0F E1 70 03 09 0A 09 06 2B 34
+ 41 07 12 14 28 2E
+ 00 01 21
+ 00 01 29
+ 00 05 2A 00 00 00 EF
+ 00 05 2B 00 00 00 EF
+ 00 01 2C];
+ qcom,mdss-spi-off-command = [20 01 28
+ 20 01 10];
+ qcom,mdss-spi-bl-min-level = <1>;
+ qcom,mdss-spi-bl-max-level = <255>;
+ qcom,mdss-spi-bl-pmic-control-type = "bl_gpio_pulse";
+ qcom,mdss-spi-reset-sequence = <1 20>, <0 1>, <1 20>;
+ qcom,mdss-spi-panel-status-check-mode = "reg_read";
+ qcom,mdss-spi-panel-status-reg = /bits/ 8 <0x0a>;
+ qcom,mdss-spi-panel-status-read-length = <1>;
+ qcom,mdss-spi-panel-max-error-count = <1>;
+ qcom,mdss-spi-panel-status-value = /bits/ 8 <0x9c>;
+ };
+};
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 9afd9a9ca23a..6631e205c783 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -52,3 +52,11 @@ config REGMAP_ALLOW_WRITE_DEBUGFS
Say 'y' here to allow the regmap debugfs write. Regmap debugfs write
could be risky when accessing some essential hardwares, so it is not
recommended to enable this option on any production device.
+
+config REGMAP_SND_DIGCDC
+ depends on SND_SOC
+ bool "For regmap on DIGCDC"
+ select REGMAP_MMIO
+ default n
+ help
+ Say 'y' here to enable regmap_mmio for digital codec.
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index a089e7c552e2..0937b2f14d77 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2303,8 +2303,8 @@ struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
pid_struct = find_get_pid(entry->tgid);
if (!pid_struct) {
DIAG_LOG(DIAG_DEBUG_DCI,
- "diag: valid pid doesn't exist for pid = %d\n",
- entry->tgid);
+ "diag: Exited pid (%d) doesn't match dci client of pid (%d)\n",
+ tgid, entry->tgid);
continue;
}
task_s = get_pid_task(pid_struct, PIDTYPE_PID);
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 41ab3386f138..8d53a06c62a3 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -1773,6 +1773,7 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
if (!mask_info->update_buf) {
kfree(mask_info->ptr);
+ mask_info->ptr = NULL;
return -ENOMEM;
}
kmemleak_not_leak(mask_info->update_buf);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index c6136677c6d2..8adaf6478acb 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -552,8 +552,8 @@ static int diagchar_close(struct inode *inode, struct file *file)
{
int ret;
- DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
- current->comm);
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s process exit with pid = %d\n",
+ current->comm, current->tgid);
ret = diag_remove_client_entry(file);
return ret;
@@ -3262,6 +3262,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
int exit_stat = 0;
int write_len = 0;
struct diag_md_session_t *session_info = NULL;
+ struct pid *pid_struct = NULL;
+ struct task_struct *task_s = NULL;
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
@@ -3506,8 +3508,19 @@ exit:
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl,
track);
- if (entry->client->tgid != current->tgid)
+ pid_struct = find_get_pid(entry->tgid);
+ if (!pid_struct)
continue;
+ task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+ if (!task_s) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid task doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ if (task_s == entry->client)
+ if (entry->client->tgid != current->tgid)
+ continue;
if (!entry->in_service)
continue;
if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index e46f65671dec..da6cb173713a 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -144,26 +144,15 @@ static void uhid_hid_stop(struct hid_device *hid)
static int uhid_hid_open(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
- int retval = 0;
- mutex_lock(&uhid_open_mutex);
- if (!hid->open++) {
- retval = uhid_queue_event(uhid, UHID_OPEN);
- if (retval)
- hid->open--;
- }
- mutex_unlock(&uhid_open_mutex);
- return retval;
+ return uhid_queue_event(uhid, UHID_OPEN);
}
static void uhid_hid_close(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
- mutex_lock(&uhid_open_mutex);
- if (!--hid->open)
- uhid_queue_event(uhid, UHID_CLOSE);
- mutex_unlock(&uhid_open_mutex);
+ uhid_queue_event(uhid, UHID_CLOSE);
}
static int uhid_hid_parse(struct hid_device *hid)
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 2d1cb6e39951..b4f0dbf3d6d2 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -34,6 +34,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/thermal.h>
+#include <linux/qpnp/qpnp-revid.h>
/* QPNP VADC register definition */
#define QPNP_VADC_REVISION1 0x0
@@ -203,6 +204,7 @@ struct qpnp_vadc_chip {
struct power_supply *vadc_chg_vote;
bool vadc_hc;
int vadc_debug_count;
+ struct pmic_revid_data *pmic_rev_id;
struct sensor_device_attribute sens_attr[0];
};
@@ -466,6 +468,44 @@ static void qpnp_vadc_hc_update_adc_dig_param(struct qpnp_vadc_chip *vadc,
pr_debug("VADC_DIG_PARAM value:0x%x\n", *data);
}
+static int qpnp_vadc_channel_check(struct qpnp_vadc_chip *vadc, u8 buf)
+{
+ int rc = 0;
+ u8 chno = 0;
+
+ rc = qpnp_vadc_read_reg(vadc,
+ QPNP_VADC_HC1_ADC_CH_SEL_CTL, &chno, 1);
+ if (rc < 0) {
+ pr_err("Channel reread failed\n");
+ return rc;
+ }
+
+ if (buf != chno) {
+ pr_debug("channel write fails once: written:0x%x actual:0x%x\n",
+ chno, buf);
+
+ rc = qpnp_vadc_write_reg(vadc,
+ QPNP_VADC_HC1_ADC_CH_SEL_CTL, &buf, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc register configure failed\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(vadc,
+ QPNP_VADC_HC1_ADC_CH_SEL_CTL, &chno, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc configure read failed\n");
+ return rc;
+ }
+
+ if (chno != buf) {
+ pr_err("Write fails twice: written: 0x%x\n", chno);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int qpnp_vadc_hc_pre_configure_usb_in(struct qpnp_vadc_chip *vadc,
int dt_index)
{
@@ -473,6 +513,11 @@ static int qpnp_vadc_hc_pre_configure_usb_in(struct qpnp_vadc_chip *vadc,
u8 buf;
u8 dig_param = 0;
struct qpnp_adc_amux_properties conv;
+ bool channel_check = false;
+
+ if (vadc->pmic_rev_id)
+ if (vadc->pmic_rev_id->pmic_subtype == PMI632_SUBTYPE)
+ channel_check = true;
/* Setup dig params for USB_IN_V */
conv.decimation = DECIMATION_TYPE2;
@@ -497,6 +542,12 @@ static int qpnp_vadc_hc_pre_configure_usb_in(struct qpnp_vadc_chip *vadc,
if (rc < 0)
return rc;
+ if (channel_check) {
+ rc = qpnp_vadc_channel_check(vadc, buf);
+ if (rc)
+ return rc;
+ }
+
buf = QPNP_VADC_HC1_ADC_EN;
rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_EN_CTL1, &buf, 1);
if (rc < 0)
@@ -521,6 +572,12 @@ static int qpnp_vadc_hc_pre_configure_usb_in(struct qpnp_vadc_chip *vadc,
if (rc < 0)
return rc;
+ if (channel_check) {
+ rc = qpnp_vadc_channel_check(vadc, buf);
+ if (rc)
+ return rc;
+ }
+
/* Wait for GND read to complete */
rc = qpnp_vadc_wait_for_eoc(vadc);
if (rc < 0)
@@ -542,10 +599,16 @@ static int qpnp_vadc_hc_configure(struct qpnp_vadc_chip *vadc,
struct qpnp_adc_amux_properties *amux_prop)
{
int rc = 0;
- u8 buf[6];
+ u8 buf[5];
+ u8 conv_req = 0;
+ bool channel_check = false;
+
+ if (vadc->pmic_rev_id)
+ if (vadc->pmic_rev_id->pmic_subtype == PMI632_SUBTYPE)
+ channel_check = true;
/* Read registers 0x42 through 0x46 */
- rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 5);
if (rc < 0) {
pr_err("qpnp adc configure block read failed\n");
return rc;
@@ -569,7 +632,7 @@ static int qpnp_vadc_hc_configure(struct qpnp_vadc_chip *vadc,
buf[4] |= QPNP_VADC_HC1_ADC_EN;
/* Select CONV request */
- buf[5] |= QPNP_VADC_HC1_CONV_REQ_START;
+ conv_req = QPNP_VADC_HC1_CONV_REQ_START;
if (!vadc->vadc_poll_eoc)
reinit_completion(&vadc->adc->adc_rslt_completion);
@@ -578,7 +641,20 @@ static int qpnp_vadc_hc_configure(struct qpnp_vadc_chip *vadc,
buf[0], buf[1], buf[2], buf[3]);
/* Block register write from 0x42 through 0x46 */
- rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 5);
+ if (rc < 0) {
+ pr_err("qpnp adc block register configure failed\n");
+ return rc;
+ }
+
+ if (channel_check) {
+ rc = qpnp_vadc_channel_check(vadc, buf[2]);
+ if (rc)
+ return rc;
+ }
+
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_CONV_REQ,
+ &conv_req, 1);
if (rc < 0) {
pr_err("qpnp adc block register configure failed\n");
return rc;
@@ -2712,7 +2788,7 @@ static int qpnp_vadc_probe(struct platform_device *pdev)
struct qpnp_vadc_chip *vadc;
struct qpnp_adc_drv *adc_qpnp;
struct qpnp_vadc_thermal_data *adc_thermal;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node = pdev->dev.of_node, *revid_dev_node;
struct device_node *child;
const struct of_device_id *id;
int rc, count_adc_channel_list = 0, i = 0;
@@ -2766,6 +2842,16 @@ static int qpnp_vadc_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+ if (revid_dev_node) {
+ vadc->pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR(vadc->pmic_rev_id)) {
+ pr_err("Unable to get revid\n");
+ vadc->pmic_rev_id = NULL;
+ }
+ of_node_put(revid_dev_node);
+ }
+
vadc->vadc_therm_chan = adc_thermal;
if (!strcmp(id->compatible, "qcom,qpnp-vadc-hc")) {
vadc->vadc_hc = true;
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 25b85ab146a2..2dcac176812c 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1349,6 +1349,7 @@ static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
struct iommu_debug_device *ddev = file->private_data;
struct device *dev = ddev->dev;
char c[2];
+ size_t buflen = sizeof(c);
if (*offset)
return 0;
@@ -1359,13 +1360,14 @@ static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
c[0] = dev->archdata.mapping->domain ? '1' : '0';
c[1] = '\n';
- if (copy_to_user(ubuf, &c, 2)) {
+ buflen = min(count, buflen);
+ if (copy_to_user(ubuf, &c, buflen)) {
pr_err("copy_to_user failed\n");
return -EFAULT;
}
*offset = 1; /* non-zero means we're done */
- return 2;
+ return buflen;
}
static const struct file_operations iommu_debug_dma_attach_fops = {
@@ -1393,7 +1395,7 @@ static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
else
snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf)+1);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
@@ -1424,19 +1426,21 @@ static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
{
struct iommu_debug_device *ddev = file->private_data;
char c[2];
+ size_t buflen = sizeof(c);
if (*offset)
return 0;
c[0] = ddev->domain ? '1' : '0';
c[1] = '\n';
- if (copy_to_user(ubuf, &c, 2)) {
+ buflen = min(count, buflen);
+ if (copy_to_user(ubuf, &c, buflen)) {
pr_err("copy_to_user failed\n");
return -EFAULT;
}
*offset = 1; /* non-zero means we're done */
- return 2;
+ return buflen;
}
static const struct file_operations iommu_debug_attach_fops = {
@@ -1514,7 +1518,7 @@ static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
else
snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf)+1);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
@@ -1583,7 +1587,7 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
snprintf(buf, 100, "%pa\n", &phys);
}
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf)+1);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
@@ -1636,7 +1640,7 @@ static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
else
snprintf(buf, sizeof(buf), "%pa\n", &phys);
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf)+1);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
@@ -1869,7 +1873,7 @@ static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
iova = ddev->iova;
snprintf(buf, sizeof(buf), "%pa\n", &iova);
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf)+1);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index 9e4ea8d64f86..04675a4b158d 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -48,6 +48,21 @@ if MSMB_CAMERA
source "drivers/media/platform/msm/camera_v2/Kconfig"
endif # MSMB_CAMERA
+menuconfig SPECTRA2_CAMERA
+ bool "Qualcomm Technologies, Inc. Spectra camera and video capture support"
+ depends on SPECTRA_CAMERA
+ ---help---
+ Say Y here to enable selecting the video adapters for
+ QTI msm camera and video capture 2.1, enabling this
+ adds support for the camera driver stack including sensor, isp
+ and postprocessing drivers.
+
+config SPECTRA2_CAMERA_DEBUG
+ bool "QTI MSM camera debugging with printk"
+ depends on SPECTRA2_CAMERA
+ ---help---
+ Enable printk() debug for msm camera 2.1
+
source "drivers/media/platform/msm/vidc_3x/Kconfig"
source "drivers/media/platform/msm/vidc/Kconfig"
source "drivers/media/platform/msm/sde/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index e19da45e87f8..62d9b5452df7 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -5,7 +5,11 @@
obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
obj-$(CONFIG_MSM_VIDC_3X_V4L2) += vidc_3x/
obj-y += sde/
+ifeq ($(CONFIG_SPECTRA2_CAMERA), y)
+obj-$(CONFIG_SPECTRA_CAMERA) += camera_v3/
+else
obj-$(CONFIG_SPECTRA_CAMERA) += camera/
+endif
obj-$(CONFIG_MSMB_CAMERA) += camera_v2/
obj-y += broadcast/
obj-$(CONFIG_DVB_MPQ) += dvb/
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 7c55ad8dc0d4..a62c465e0392 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -450,7 +450,7 @@ static int isp_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page;
struct vfe_device *vfe_dev = vma->vm_private_data;
- struct isp_proc *isp_page = NULL;
+ struct isp_kstate *isp_page = NULL;
isp_page = vfe_dev->isp_page;
@@ -728,7 +728,7 @@ int vfe_hw_probe(struct platform_device *pdev)
vfe_dev->buf_mgr->init_done = 1;
vfe_dev->vfe_open_cnt = 0;
/*Allocate a page in kernel and map it to camera user process*/
- vfe_dev->isp_page = (struct isp_proc *)get_zeroed_page(GFP_KERNEL);
+ vfe_dev->isp_page = (struct isp_kstate *)get_zeroed_page(GFP_KERNEL);
if (vfe_dev->isp_page == NULL) {
pr_err("%s: no enough memory\n", __func__);
rc = -ENOMEM;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 7697392d3ce3..63287925eead 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -760,11 +760,6 @@ struct msm_vfe_common_subdev {
struct msm_vfe_common_dev_data *common_data;
};
-struct isp_proc {
- uint32_t kernel_sofid;
- uint32_t vfeid;
-};
-
struct vfe_device {
/* Driver private data */
struct platform_device *pdev;
@@ -849,7 +844,7 @@ struct vfe_device {
uint32_t recovery_irq1_mask;
/* total bandwidth per vfe */
uint64_t total_bandwidth;
- struct isp_proc *isp_page;
+ struct isp_kstate *isp_page;
};
struct vfe_parent_device {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index d4068be758f8..a2b32ae9513f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -746,13 +746,13 @@ static void msm_vfe40_process_epoch_irq(struct vfe_device *vfe_dev,
return;
if (irq_status0 & BIT(2)) {
- msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
MSM_ISP_COMP_IRQ_EPOCH, ts);
msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
stream_count == 0) {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 1b04e1d3b5f1..1ddbb94d66b9 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -175,6 +175,8 @@ static void msm_isp_axi_destroy_stream(
stream_info->bufq_handle[k] = 0;
stream_info->vfe_mask = 0;
stream_info->state = AVAILABLE;
+ memset(&stream_info->request_queue_cmd,
+ 0, sizeof(stream_info->request_queue_cmd));
}
}
@@ -598,7 +600,8 @@ static int msm_isp_composite_irq(struct vfe_device *vfe_dev,
*
* Returns void
*/
-static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info)
+static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info,
+ uint32_t drop_reconfig)
{
if (stream_info->stream_type == BURST_STREAM) {
if (stream_info->runtime_num_burst_capture == 0 ||
@@ -608,7 +611,8 @@ static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info)
MSM_VFE_STREAM_STOP_PERIOD;
}
- if (stream_info->undelivered_request_cnt > 0)
+ if (stream_info->undelivered_request_cnt > 0 &&
+ drop_reconfig != 1)
stream_info->current_framedrop_period =
MSM_VFE_STREAM_STOP_PERIOD;
@@ -663,7 +667,8 @@ void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
break;
case MSM_ISP_COMP_IRQ_EPOCH:
if (stream_info->state == ACTIVE)
- msm_isp_update_framedrop_reg(stream_info);
+ msm_isp_update_framedrop_reg(stream_info,
+ vfe_dev->isp_page->drop_reconfig);
break;
default:
WARN(1, "Invalid irq %d\n", irq);
@@ -3543,6 +3548,14 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
frame_src = SRC_TO_INTF(stream_info->stream_src);
pingpong_status = vfe_dev->hw_info->
vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
+
+ /* As MCT is still processing it, need to drop the additional requests*/
+ if (vfe_dev->isp_page->drop_reconfig) {
+ pr_err("%s: MCT has not yet delayed %d drop request %d\n",
+ __func__, vfe_dev->isp_page->drop_reconfig, frame_id);
+ goto error;
+ }
+
/*
* If PIX stream is active then RDI path uses SOF frame ID of PIX
* In case of standalone RDI streaming, SOF are used from
@@ -3556,9 +3569,18 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
vfe_dev->axi_data.src_info[frame_src].accept_frame == false) {
pr_debug("%s:%d invalid time to request frame %d\n",
__func__, __LINE__, frame_id);
- goto error;
- }
- if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
+ vfe_dev->isp_page->drop_reconfig = 1;
+ } else if ((vfe_dev->axi_data.src_info[frame_src].active) &&
+ (frame_id ==
+ vfe_dev->axi_data.src_info[frame_src].frame_id) &&
+ (stream_info->undelivered_request_cnt <=
+ MAX_BUFFERS_IN_HW)) {
+ vfe_dev->isp_page->drop_reconfig = 1;
+ pr_debug("%s: vfe_%d request_frame %d cur frame id %d pix %d\n",
+ __func__, vfe_dev->pdev->id, frame_id,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active);
+ } else if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev->
axi_data.src_info[frame_src].sof_counter_step)) ||
((!vfe_dev->axi_data.src_info[frame_src].active))) {
@@ -3662,6 +3684,9 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
stream_info->undelivered_request_cnt--;
pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
__func__, __LINE__);
+ queue_req->cmd_used = 0;
+ list_del(&queue_req->list);
+ stream_info->request_q_cnt--;
return rc;
}
@@ -3698,6 +3723,9 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
flags);
pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
__func__, __LINE__);
+ queue_req->cmd_used = 0;
+ list_del(&queue_req->list);
+ stream_info->request_q_cnt--;
return rc;
}
} else {
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 512fdb916127..652b0c3868f6 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -212,32 +212,34 @@ static long msm_ispif_cmd_ext(struct v4l2_subdev *sd,
long rc = 0;
struct ispif_device *ispif =
(struct ispif_device *)v4l2_get_subdevdata(sd);
- struct ispif_cfg_data_ext pcdata;
+ struct ispif_cfg_data_ext pcdata = {0};
struct msm_ispif_param_data_ext *params = NULL;
-#ifdef CONFIG_COMPAT
- struct ispif_cfg_data_ext_32 *pcdata32 =
- (struct ispif_cfg_data_ext_32 *)arg;
- if (pcdata32 == NULL) {
- pr_err("Invalid params passed from user\n");
- return -EINVAL;
- }
- pcdata.cfg_type = pcdata32->cfg_type;
- pcdata.size = pcdata32->size;
- pcdata.data = compat_ptr(pcdata32->data);
+ if (is_compat_task()) {
+#ifdef CONFIG_COMPAT
+ struct ispif_cfg_data_ext_32 *pcdata32 =
+ (struct ispif_cfg_data_ext_32 *)arg;
-#else
- struct ispif_cfg_data_ext *pcdata64 =
+ if (pcdata32 == NULL) {
+ pr_err("Invalid params passed from user\n");
+ return -EINVAL;
+ }
+ pcdata.cfg_type = pcdata32->cfg_type;
+ pcdata.size = pcdata32->size;
+ pcdata.data = compat_ptr(pcdata32->data);
+#endif
+ } else {
+ struct ispif_cfg_data_ext *pcdata64 =
(struct ispif_cfg_data_ext *)arg;
- if (pcdata64 == NULL) {
- pr_err("Invalid params passed from user\n");
- return -EINVAL;
+ if (pcdata64 == NULL) {
+ pr_err("Invalid params passed from user\n");
+ return -EINVAL;
+ }
+ pcdata.cfg_type = pcdata64->cfg_type;
+ pcdata.size = pcdata64->size;
+ pcdata.data = pcdata64->data;
}
- pcdata.cfg_type = pcdata64->cfg_type;
- pcdata.size = pcdata64->size;
- pcdata.data = pcdata64->data;
-#endif
if (pcdata.size != sizeof(struct msm_ispif_param_data_ext)) {
pr_err("%s: payload size mismatch\n", __func__);
return -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v3/Makefile b/drivers/media/platform/msm/camera_v3/Makefile
new file mode 100644
index 000000000000..9e0aee9f69e3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme/
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/Makefile b/drivers/media/platform/msm/camera_v3/cam_cdm/Makefile
new file mode 100644
index 000000000000..e04f3054ad43
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm_soc.o cam_cdm_util.o cam_cdm_intf.o \
+ cam_cdm_core_common.o cam_cdm_virtual_core.o \
+ cam_cdm_hw_core.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm.h
new file mode 100644
index 000000000000..ff8be3570bc5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm.h
@@ -0,0 +1,261 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_H_
+#define _CAM_CDM_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_soc_util.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_intf.h"
+#include "cam_hw.h"
+#include "cam_debug_util.h"
+
+#define CAM_MAX_SW_CDM_VERSION_SUPPORTED 1
+#define CAM_SW_CDM_INDEX 0
+#define CAM_CDM_INFLIGHT_WORKS 5
+#define CAM_CDM_HW_RESET_TIMEOUT 300
+
+#define CAM_CDM_HW_ID_MASK 0xF
+#define CAM_CDM_HW_ID_SHIFT 0x5
+#define CAM_CDM_CLIENTS_ID_MASK 0x1F
+
+#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \
+ CAM_CDM_HW_ID_MASK)
+#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \
+ ((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \
+ ((client_idx) & CAM_CDM_CLIENTS_ID_MASK))
+#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK)
+#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1)
+#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1)
+
+/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/
+enum cam_cdm_reg_attr {
+ CAM_REG_ATTR_READ,
+ CAM_REG_ATTR_WRITE,
+ CAM_REG_ATTR_READ_WRITE,
+};
+
+/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/
+enum cam_cdm_hw_process_intf_cmd {
+ CAM_CDM_HW_INTF_CMD_ACQUIRE,
+ CAM_CDM_HW_INTF_CMD_RELEASE,
+ CAM_CDM_HW_INTF_CMD_SUBMIT_BL,
+ CAM_CDM_HW_INTF_CMD_RESET_HW,
+ CAM_CDM_HW_INTF_CMD_INVALID,
+};
+
+/* enum cam_cdm_regs - CDM driver offset enums.*/
+enum cam_cdm_regs {
+ /*cfg_offsets 0*/
+ CDM_CFG_HW_VERSION,
+ CDM_CFG_TITAN_VERSION,
+ CDM_CFG_RST_CMD,
+ CDM_CFG_CGC_CFG,
+ CDM_CFG_CORE_CFG,
+ CDM_CFG_CORE_EN,
+ CDM_CFG_FE_CFG,
+ /*irq_offsets 7*/
+ CDM_IRQ_MASK,
+ CDM_IRQ_CLEAR,
+ CDM_IRQ_CLEAR_CMD,
+ CDM_IRQ_SET,
+ CDM_IRQ_SET_CMD,
+ CDM_IRQ_STATUS,
+ CDM_IRQ_USR_DATA,
+ /*BL FIFO Registers 14*/
+ CDM_BL_FIFO_BASE_REG,
+ CDM_BL_FIFO_LEN_REG,
+ CDM_BL_FIFO_STORE_REG,
+ CDM_BL_FIFO_CFG,
+ CDM_BL_FIFO_RB,
+ CDM_BL_FIFO_BASE_RB,
+ CDM_BL_FIFO_LEN_RB,
+ CDM_BL_FIFO_PENDING_REQ_RB,
+ /*CDM System Debug Registers 22*/
+ CDM_DBG_WAIT_STATUS,
+ CDM_DBG_SCRATCH_0_REG,
+ CDM_DBG_SCRATCH_1_REG,
+ CDM_DBG_SCRATCH_2_REG,
+ CDM_DBG_SCRATCH_3_REG,
+ CDM_DBG_SCRATCH_4_REG,
+ CDM_DBG_SCRATCH_5_REG,
+ CDM_DBG_SCRATCH_6_REG,
+ CDM_DBG_SCRATCH_7_REG,
+ CDM_DBG_LAST_AHB_ADDR,
+ CDM_DBG_LAST_AHB_DATA,
+ CDM_DBG_CORE_DBUG,
+ CDM_DBG_LAST_AHB_ERR_ADDR,
+ CDM_DBG_LAST_AHB_ERR_DATA,
+ CDM_DBG_CURRENT_BL_BASE,
+ CDM_DBG_CURRENT_BL_LEN,
+ CDM_DBG_CURRENT_USED_AHB_BASE,
+ CDM_DBG_DEBUG_STATUS,
+ /*FE Bus Miser Registers 40*/
+ CDM_BUS_MISR_CFG_0,
+ CDM_BUS_MISR_CFG_1,
+ CDM_BUS_MISR_RD_VAL,
+ /*Performance Counter registers 43*/
+ CDM_PERF_MON_CTRL,
+ CDM_PERF_MON_0,
+ CDM_PERF_MON_1,
+ CDM_PERF_MON_2,
+ /*Spare registers 47*/
+ CDM_SPARE,
+};
+
+/* struct cam_cdm_reg_offset - struct for offset with attribute.*/
+struct cam_cdm_reg_offset {
+ uint32_t offset;
+ enum cam_cdm_reg_attr attribute;
+};
+
+/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/
+struct cam_cdm_reg_offset_table {
+ uint32_t first_offset;
+ uint32_t last_offset;
+ uint32_t reg_count;
+ const struct cam_cdm_reg_offset *offsets;
+ uint32_t offset_max_size;
+};
+
+/* enum cam_cdm_flags - Bit fields for CDM flags used */
+enum cam_cdm_flags {
+ CAM_CDM_FLAG_SHARED_CDM,
+ CAM_CDM_FLAG_PRIVATE_CDM,
+};
+
+/* enum cam_cdm_type - Enum for possible CAM CDM types */
+enum cam_cdm_type {
+ CAM_VIRTUAL_CDM,
+ CAM_HW_CDM,
+};
+
+/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */
+enum cam_cdm_mem_base_index {
+ CAM_HW_CDM_BASE_INDEX,
+ CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK,
+};
+
+/* struct cam_cdm_client - struct for cdm clients data.*/
+struct cam_cdm_client {
+ struct cam_cdm_acquire_data data;
+ void __iomem *changebase_addr;
+ uint32_t stream_on;
+ uint32_t refcount;
+ struct mutex lock;
+ uint32_t handle;
+};
+
+/* struct cam_cdm_work_payload - struct for cdm work payload data.*/
+struct cam_cdm_work_payload {
+ struct cam_hw_info *hw;
+ uint32_t irq_status;
+ uint32_t irq_data;
+ struct work_struct work;
+};
+
+/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */
+enum cam_cdm_bl_cb_type {
+ CAM_HW_CDM_BL_CB_CLIENT = 1,
+ CAM_HW_CDM_BL_CB_INTERNAL,
+};
+
+/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/
+struct cam_cdm_bl_cb_request_entry {
+ uint8_t bl_tag;
+ enum cam_cdm_bl_cb_type request_type;
+ uint32_t client_hdl;
+ void *userdata;
+ uint32_t cookie;
+ struct list_head entry;
+};
+
+/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/
+struct cam_cdm_hw_intf_cmd_submit_bl {
+ uint32_t handle;
+ struct cam_cdm_bl_request *data;
+};
+
+/* struct cam_cdm_hw_mem - CDM hw memory struct */
+struct cam_cdm_hw_mem {
+ int32_t handle;
+ uint32_t vaddr;
+ uintptr_t kmdvaddr;
+ size_t size;
+};
+
+/* struct cam_cdm - CDM hw device struct */
+struct cam_cdm {
+ uint32_t index;
+ char name[128];
+ enum cam_cdm_id id;
+ enum cam_cdm_flags flags;
+ struct completion reset_complete;
+ struct completion bl_complete;
+ struct workqueue_struct *work_queue;
+ struct list_head bl_request_list;
+ struct cam_hw_version version;
+ uint32_t hw_version;
+ uint32_t hw_family_version;
+ struct cam_iommu_handle iommu_hdl;
+ struct cam_cdm_reg_offset_table *offset_tbl;
+ struct cam_cdm_utils_ops *ops;
+ struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+ uint8_t bl_tag;
+ atomic_t error;
+ atomic_t bl_done;
+ struct cam_cdm_hw_mem gen_irq;
+ uint32_t cpas_handle;
+};
+
+/* struct cam_cdm_private_dt_data - CDM hw custom dt data */
+struct cam_cdm_private_dt_data {
+ bool dt_cdm_shared;
+ uint32_t dt_num_supported_clients;
+ const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+};
+
+/* struct cam_cdm_intf_devices - CDM mgr interface devices */
+struct cam_cdm_intf_devices {
+ struct mutex lock;
+ uint32_t refcount;
+ struct cam_hw_intf *device;
+ struct cam_cdm_private_dt_data *data;
+};
+
+/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */
+struct cam_cdm_intf_mgr {
+ bool probe_done;
+ struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM];
+ uint32_t cdm_count;
+ uint32_t dt_supported_hw_cdm;
+ int32_t refcount;
+};
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+ struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+ uint32_t *index);
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+ struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+ uint32_t index);
+
+#endif /* _CAM_CDM_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_core_common.c
new file mode 100644
index 000000000000..506de5e88309
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_core_common.c
@@ -0,0 +1,582 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_io_util.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_soc.h"
+#include "cam_cdm_core_common.h"
+
+static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
+{
+ mutex_lock(&client->lock);
+ CAM_DBG(CAM_CDM, "CDM client get refcount=%d",
+ client->refcount);
+ client->refcount++;
+ mutex_unlock(&client->lock);
+}
+
+static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
+{
+ mutex_lock(&client->lock);
+ CAM_DBG(CAM_CDM, "CDM client put refcount=%d",
+ client->refcount);
+ if (client->refcount > 0) {
+ client->refcount--;
+ } else {
+ CAM_ERR(CAM_CDM, "Refcount put when zero");
+ WARN_ON(1);
+ }
+ mutex_unlock(&client->lock);
+}
+
+bool cam_cdm_set_cam_hw_version(
+ uint32_t ver, struct cam_hw_version *cam_version)
+{
+ switch (ver) {
+ case CAM_CDM170_VERSION:
+ case CAM_CDM175_VERSION:
+ cam_version->major = (ver & 0xF0000000);
+ cam_version->minor = (ver & 0xFFF0000);
+ cam_version->incr = (ver & 0xFFFF);
+ cam_version->reserved = 0;
+ return true;
+ default:
+ CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver);
+ break;
+ }
+ return false;
+}
+
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ if (!irq_data)
+ return false;
+
+ CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type);
+
+ return false;
+}
+
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+ uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version)
+{
+ if (by_cam_version == false) {
+ switch (ver) {
+ case CAM_CDM170_VERSION:
+ case CAM_CDM175_VERSION:
+ return &CDM170_ops;
+ default:
+ CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util",
+ ver);
+ }
+ } else if (cam_version) {
+ if (((cam_version->major == 1) &&
+ (cam_version->minor == 0) &&
+ (cam_version->incr == 0)) ||
+ ((cam_version->major == 1) &&
+ (cam_version->minor == 1) &&
+ (cam_version->incr == 0))) {
+
+ CAM_DBG(CAM_CDM,
+ "cam_hw_version=%x:%x:%x supported",
+ cam_version->major, cam_version->minor,
+ cam_version->incr);
+ return &CDM170_ops;
+ }
+
+ CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported",
+ cam_version->major, cam_version->minor,
+ cam_version->incr);
+ }
+
+ return NULL;
+}
+
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+ uint32_t tag, struct list_head *bl_list)
+{
+ struct cam_cdm_bl_cb_request_entry *node;
+
+ list_for_each_entry(node, bl_list, entry) {
+ if (node->bl_tag == tag)
+ return node;
+ }
+ CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag);
+
+ return NULL;
+}
+
+int cam_cdm_get_caps(void *hw_priv,
+ void *get_hw_cap_args, uint32_t arg_size)
+{
+ struct cam_hw_info *cdm_hw = hw_priv;
+ struct cam_cdm *cdm_core;
+
+ if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) &&
+ (sizeof(struct cam_iommu_handle) == arg_size)) {
+ cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+ *((struct cam_iommu_handle *)get_hw_cap_args) =
+ cdm_core->iommu_hdl;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int cam_cdm_find_free_client_slot(struct cam_cdm *hw)
+{
+ int i;
+
+ for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+ if (hw->clients[i] == NULL) {
+ CAM_DBG(CAM_CDM, "Found client slot %d", i);
+ return i;
+ }
+ }
+ CAM_ERR(CAM_CDM, "No more client slots");
+
+ return -EBUSY;
+}
+
+
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+ enum cam_cdm_cb_status status, void *data)
+{
+ int i;
+ struct cam_cdm *core = NULL;
+ struct cam_cdm_client *client = NULL;
+
+ if (!cdm_hw) {
+ CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info");
+ return;
+ }
+ core = (struct cam_cdm *)cdm_hw->core_info;
+
+ if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+ int client_idx;
+ struct cam_cdm_bl_cb_request_entry *node =
+ (struct cam_cdm_bl_cb_request_entry *)data;
+
+ client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
+ client = core->clients[client_idx];
+ if ((!client) || (client->handle != node->client_hdl)) {
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
+ node->client_hdl);
+ return;
+ }
+ cam_cdm_get_client_refcount(client);
+ if (client->data.cam_cdm_callback) {
+ CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d",
+ client->data.identifier, node->cookie);
+ client->data.cam_cdm_callback(node->client_hdl,
+ node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
+ node->cookie);
+ CAM_DBG(CAM_CDM, "Exit client cb cookie=%d",
+ node->cookie);
+ } else {
+ CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x",
+ node->client_hdl);
+ }
+ cam_cdm_put_client_refcount(client);
+ return;
+ }
+
+ for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+ if (core->clients[i] != NULL) {
+ client = core->clients[i];
+ mutex_lock(&client->lock);
+ CAM_DBG(CAM_CDM, "Found client slot %d", i);
+ if (client->data.cam_cdm_callback) {
+ if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
+ unsigned long iova =
+ (unsigned long)data;
+
+ client->data.cam_cdm_callback(
+ client->handle,
+ client->data.userdata,
+ CAM_CDM_CB_STATUS_PAGEFAULT,
+ (iova & 0xFFFFFFFF));
+ }
+ } else {
+ CAM_ERR(CAM_CDM,
+ "No cb registered for client hdl=%x",
+ client->handle);
+ }
+ mutex_unlock(&client->lock);
+ }
+ }
+}
+
+int cam_cdm_stream_ops_internal(void *hw_priv,
+ void *start_args, bool operation)
+{
+ struct cam_hw_info *cdm_hw = hw_priv;
+ struct cam_cdm *core = NULL;
+ int rc = -EPERM;
+ int client_idx;
+ struct cam_cdm_client *client;
+ uint32_t *handle = start_args;
+
+ if (!hw_priv)
+ return -EINVAL;
+
+ core = (struct cam_cdm *)cdm_hw->core_info;
+ client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+ client = core->clients[client_idx];
+ if (!client) {
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle);
+ return -EINVAL;
+ }
+ cam_cdm_get_client_refcount(client);
+ if (*handle != client->handle) {
+ CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle);
+ cam_cdm_put_client_refcount(client);
+ return -EINVAL;
+ }
+ if (operation == true) {
+ if (true == client->stream_on) {
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM client is already streamed ON");
+ cam_cdm_put_client_refcount(client);
+ return rc;
+ }
+ } else {
+ if (client->stream_on == false) {
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM client is already streamed Off");
+ cam_cdm_put_client_refcount(client);
+ return rc;
+ }
+ }
+
+ mutex_lock(&cdm_hw->hw_mutex);
+ if (operation == true) {
+ if (!cdm_hw->open_count) {
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+ rc = cam_cpas_start(core->cpas_handle,
+ &ahb_vote, &axi_vote);
+ if (rc != 0) {
+ CAM_ERR(CAM_CDM, "CPAS start failed");
+ goto end;
+ }
+ CAM_DBG(CAM_CDM, "CDM init first time");
+ if (core->id == CAM_CDM_VIRTUAL) {
+ CAM_DBG(CAM_CDM,
+ "Virtual CDM HW init first time");
+ rc = 0;
+ } else {
+ CAM_DBG(CAM_CDM, "CDM HW init first time");
+ rc = cam_hw_cdm_init(hw_priv, NULL, 0);
+ if (rc == 0) {
+ rc = cam_hw_cdm_alloc_genirq_mem(
+ hw_priv);
+ if (rc != 0) {
+ CAM_ERR(CAM_CDM,
+ "Genirqalloc failed");
+ cam_hw_cdm_deinit(hw_priv,
+ NULL, 0);
+ }
+ } else {
+ CAM_ERR(CAM_CDM, "CDM HW init failed");
+ }
+ }
+ if (rc == 0) {
+ cdm_hw->open_count++;
+ client->stream_on = true;
+ } else {
+ if (cam_cpas_stop(core->cpas_handle))
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
+ }
+ } else {
+ cdm_hw->open_count++;
+ CAM_DBG(CAM_CDM, "CDM HW already ON count=%d",
+ cdm_hw->open_count);
+ rc = 0;
+ client->stream_on = true;
+ }
+ } else {
+ if (cdm_hw->open_count) {
+ cdm_hw->open_count--;
+ CAM_DBG(CAM_CDM, "stream OFF CDM %d",
+ cdm_hw->open_count);
+ if (!cdm_hw->open_count) {
+ CAM_DBG(CAM_CDM, "CDM Deinit now");
+ if (core->id == CAM_CDM_VIRTUAL) {
+ CAM_DBG(CAM_CDM,
+ "Virtual CDM HW Deinit");
+ rc = 0;
+ } else {
+ CAM_DBG(CAM_CDM, "CDM HW Deinit now");
+ rc = cam_hw_cdm_deinit(
+ hw_priv, NULL, 0);
+ if (cam_hw_cdm_release_genirq_mem(
+ hw_priv))
+ CAM_ERR(CAM_CDM,
+ "Genirq release fail");
+ }
+ if (rc) {
+ CAM_ERR(CAM_CDM,
+ "Deinit failed in streamoff");
+ } else {
+ client->stream_on = false;
+ rc = cam_cpas_stop(core->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_CDM,
+ "CPAS stop failed");
+ }
+ } else {
+ client->stream_on = false;
+ rc = 0;
+ CAM_DBG(CAM_CDM,
+ "Client stream off success =%d",
+ cdm_hw->open_count);
+ }
+ } else {
+ CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d",
+ cdm_hw->open_count);
+ rc = -ENXIO;
+ }
+ }
+end:
+ cam_cdm_put_client_refcount(client);
+ mutex_unlock(&cdm_hw->hw_mutex);
+ return rc;
+}
+
+int cam_cdm_stream_start(void *hw_priv,
+ void *start_args, uint32_t size)
+{
+ int rc = 0;
+
+ if (!hw_priv)
+ return -EINVAL;
+
+ rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true);
+ return rc;
+
+}
+
+int cam_cdm_stream_stop(void *hw_priv,
+ void *start_args, uint32_t size)
+{
+ int rc = 0;
+
+ if (!hw_priv)
+ return -EINVAL;
+
+ rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false);
+ return rc;
+
+}
+
+int cam_cdm_process_cmd(void *hw_priv,
+ uint32_t cmd, void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *cdm_hw = hw_priv;
+ struct cam_hw_soc_info *soc_data = NULL;
+ struct cam_cdm *core = NULL;
+ int rc = -EINVAL;
+
+ if ((!hw_priv) || (!cmd_args) ||
+ (cmd >= CAM_CDM_HW_INTF_CMD_INVALID))
+ return rc;
+
+ soc_data = &cdm_hw->soc_info;
+ core = (struct cam_cdm *)cdm_hw->core_info;
+ switch (cmd) {
+ case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
+ struct cam_cdm_hw_intf_cmd_submit_bl *req;
+ int idx;
+ struct cam_cdm_client *client;
+
+ if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
+ CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
+ arg_size);
+ break;
+ }
+ req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
+ if ((req->data->type < 0) ||
+ (req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
+ CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d",
+ req->data->type);
+ break;
+ }
+ idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
+ client = core->clients[idx];
+ if ((!client) || (req->handle != client->handle)) {
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
+ req->handle);
+ break;
+ }
+ cam_cdm_get_client_refcount(client);
+ if ((req->data->flag == true) &&
+ (!client->data.cam_cdm_callback)) {
+ CAM_ERR(CAM_CDM,
+ "CDM request cb without registering cb");
+ cam_cdm_put_client_refcount(client);
+ break;
+ }
+ if (client->stream_on != true) {
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM needs to be streamed ON first");
+ cam_cdm_put_client_refcount(client);
+ break;
+ }
+ if (core->id == CAM_CDM_VIRTUAL)
+ rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client);
+ else
+ rc = cam_hw_cdm_submit_bl(cdm_hw, req, client);
+
+ cam_cdm_put_client_refcount(client);
+ break;
+ }
+ case CAM_CDM_HW_INTF_CMD_ACQUIRE: {
+ struct cam_cdm_acquire_data *data;
+ int idx;
+ struct cam_cdm_client *client;
+
+ if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
+ CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
+ arg_size);
+ break;
+ }
+
+ mutex_lock(&cdm_hw->hw_mutex);
+ data = (struct cam_cdm_acquire_data *)cmd_args;
+ CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d",
+ data->identifier, core->index);
+ idx = cam_cdm_find_free_client_slot(core);
+ if ((idx < 0) || (core->clients[idx])) {
+ mutex_unlock(&cdm_hw->hw_mutex);
+ CAM_ERR(CAM_CDM,
+ "Fail to client slots, client=%s in hw idx=%d",
+ data->identifier, core->index);
+ break;
+ }
+ core->clients[idx] = kzalloc(sizeof(struct cam_cdm_client),
+ GFP_KERNEL);
+ if (!core->clients[idx]) {
+ mutex_unlock(&cdm_hw->hw_mutex);
+ rc = -ENOMEM;
+ break;
+ }
+
+ mutex_unlock(&cdm_hw->hw_mutex);
+ client = core->clients[idx];
+ mutex_init(&client->lock);
+ data->ops = core->ops;
+ if (core->id == CAM_CDM_VIRTUAL) {
+ data->cdm_version.major = 1;
+ data->cdm_version.minor = 0;
+ data->cdm_version.incr = 0;
+ data->cdm_version.reserved = 0;
+ data->ops = cam_cdm_get_ops(0,
+ &data->cdm_version, true);
+ if (!data->ops) {
+ mutex_destroy(&client->lock);
+ mutex_lock(&cdm_hw->hw_mutex);
+ kfree(core->clients[idx]);
+ core->clients[idx] = NULL;
+ mutex_unlock(
+ &cdm_hw->hw_mutex);
+ rc = -EPERM;
+ CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm");
+ break;
+ }
+ } else {
+ data->cdm_version = core->version;
+ }
+
+ cam_cdm_get_client_refcount(client);
+ mutex_lock(&client->lock);
+ memcpy(&client->data, data,
+ sizeof(struct cam_cdm_acquire_data));
+ client->handle = CAM_CDM_CREATE_CLIENT_HANDLE(
+ core->index,
+ idx);
+ client->stream_on = false;
+ data->handle = client->handle;
+ CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d",
+ data->identifier, core->index);
+ mutex_unlock(&client->lock);
+ rc = 0;
+ break;
+ }
+ case CAM_CDM_HW_INTF_CMD_RELEASE: {
+ uint32_t *handle = cmd_args;
+ int idx;
+ struct cam_cdm_client *client;
+
+ if (sizeof(uint32_t) != arg_size) {
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM cmd %d size=%x for handle=%x",
+ cmd, arg_size, *handle);
+ return -EINVAL;
+ }
+ idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+ mutex_lock(&cdm_hw->hw_mutex);
+ client = core->clients[idx];
+ if ((!client) || (*handle != client->handle)) {
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x",
+ client, *handle);
+ mutex_unlock(&cdm_hw->hw_mutex);
+ break;
+ }
+ cam_cdm_put_client_refcount(client);
+ mutex_lock(&client->lock);
+ if (client->refcount != 0) {
+ CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d",
+ client->refcount);
+ rc = -EPERM;
+ mutex_unlock(&client->lock);
+ mutex_unlock(&cdm_hw->hw_mutex);
+ break;
+ }
+ core->clients[idx] = NULL;
+ mutex_unlock(&client->lock);
+ mutex_destroy(&client->lock);
+ kfree(client);
+ mutex_unlock(&cdm_hw->hw_mutex);
+ rc = 0;
+ break;
+ }
+ case CAM_CDM_HW_INTF_CMD_RESET_HW: {
+ CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x",
+ *((uint32_t *)cmd_args));
+ break;
+ }
+ default:
+ CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
+ break;
+ }
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_core_common.h
new file mode 100644
index 000000000000..3c9e097023d3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_core_common.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_CORE_COMMON_H_
+#define _CAM_CDM_CORE_COMMON_H_
+
+#include "cam_mem_mgr.h"
+
+#define CAM_CDM170_VERSION 0x10000000
+#define CAM_CDM175_VERSION 0x10010000
+
+extern struct cam_cdm_utils_ops CDM170_ops;
+
+int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
+int cam_hw_cdm_release_genirq_mem(void *hw_priv);
+int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
+int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
+ bool operation);
+int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
+ uint32_t arg_size);
+bool cam_cdm_set_cam_hw_version(
+ uint32_t ver, struct cam_hw_version *cam_version);
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data);
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+ uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+ struct cam_cdm_hw_intf_cmd_submit_bl *req,
+ struct cam_cdm_client *client);
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+ struct cam_cdm_hw_intf_cmd_submit_bl *req,
+ struct cam_cdm_client *client);
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+ uint32_t tag, struct list_head *bl_list);
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+ enum cam_cdm_cb_status status, void *data);
+
+#endif /* _CAM_CDM_CORE_COMMON_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_hw_core.c
new file mode 100644
index 000000000000..13a653ae0399
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_hw_core.c
@@ -0,0 +1,1146 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <media/cam_req_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw_cdm170_reg.h"
+
+#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
+#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
+#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
+#define CAM_HW_CDM_BPS_NAME "qcom,cam170-bps-cdm"
+
+#define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000
+
+static void cam_hw_cdm_work(struct work_struct *work);
+
+/* DT match table entry for all CDM variants*/
+static const struct of_device_id msm_cam_hw_cdm_dt_match[] = {
+ {
+ .compatible = CAM_HW_CDM_CPAS_0_NAME,
+ .data = &cam170_cpas_cdm_offset_table,
+ },
+ {}
+};
+
+static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name)
+{
+ if (!strcmp(CAM_HW_CDM_CPAS_0_NAME, name))
+ return CAM_CDM_CPAS_0;
+
+ return CAM_CDM_MAX;
+}
+
+int cam_hw_cdm_bl_fifo_pending_bl_rb(struct cam_hw_info *cdm_hw,
+ uint32_t *pending_bl)
+{
+ int rc = 0;
+
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+ pending_bl)) {
+ CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw,
+ bool enable)
+{
+ int rc = -EIO;
+ uint32_t irq_mask = 0;
+ struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ &irq_mask)) {
+ CAM_ERR(CAM_CDM, "Failed to read CDM IRQ mask");
+ return rc;
+ }
+
+ if (enable == true) {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ (irq_mask | 0x4))) {
+ CAM_ERR(CAM_CDM, "Write failed to enable BL done irq");
+ } else {
+ atomic_inc(&core->bl_done);
+ rc = 0;
+ CAM_DBG(CAM_CDM, "BL done irq enabled =%d",
+ atomic_read(&core->bl_done));
+ }
+ } else {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ (irq_mask & 0x70003))) {
+ CAM_ERR(CAM_CDM, "Write failed to disable BL done irq");
+ } else {
+ atomic_dec(&core->bl_done);
+ rc = 0;
+ CAM_DBG(CAM_CDM, "BL done irq disable =%d",
+ atomic_read(&core->bl_done));
+ }
+ }
+ return rc;
+}
+
+static int cam_hw_cdm_enable_core(struct cam_hw_info *cdm_hw, bool enable)
+{
+ int rc = 0;
+
+ if (enable == true) {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x01)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core enable");
+ rc = -EIO;
+ }
+ } else {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x02)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core disable");
+ rc = -EIO;
+ }
+ }
+ return rc;
+}
+
+int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+ int rc = 0;
+
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+ int rc = 0;
+
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw)
+{
+ uint32_t dump_reg = 0;
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump core en=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch0=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch1=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch2=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch3=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch4=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch5=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch6=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch7=%x", dump_reg);
+
+}
+
+void cam_hw_cdm_dump_core_debug_registers(
+ struct cam_hw_info *cdm_hw)
+{
+ uint32_t dump_reg, core_dbg, loop_cnt;
+
+ mutex_lock(&cdm_hw->hw_mutex);
+ cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg);
+ /* First pause CDM, If it fails still proceed to dump debug info */
+ cam_hw_cdm_enable_core(cdm_hw, false);
+ cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
+ loop_cnt = dump_reg;
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW Debug status reg=%x", dump_reg);
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
+ if (core_dbg & 0x100) {
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
+ CAM_ERR(CAM_CDM, "AHB dump reglastaddr=%x", dump_reg);
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
+ CAM_ERR(CAM_CDM, "AHB dump reglastdata=%x", dump_reg);
+ } else {
+ CAM_ERR(CAM_CDM, "CDM HW AHB dump not enable");
+ }
+
+ if (core_dbg & 0x10000) {
+ int i;
+
+ CAM_ERR(CAM_CDM, "CDM HW BL FIFO dump with loop count=%d",
+ loop_cnt);
+ for (i = 0 ; i < loop_cnt ; i++) {
+ cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
+ cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
+ &dump_reg);
+ CAM_ERR(CAM_CDM, "BL(%d) base addr =%x", i, dump_reg);
+ cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
+ &dump_reg);
+ CAM_ERR(CAM_CDM, "BL(%d) len=%d tag=%d", i,
+ (dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+ }
+ } else {
+ CAM_ERR(CAM_CDM, "CDM HW BL FIFO readback not enable");
+ }
+
+ CAM_ERR(CAM_CDM, "CDM HW default dump");
+ cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW core cfg=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW irq status=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW irq set reg=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current BL base=%x", dump_reg);
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current BL len=%d tag=%d",
+ (dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+
+ cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current AHB base=%x", dump_reg);
+
+ cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
+
+ /* Enable CDM back */
+ cam_hw_cdm_enable_core(cdm_hw, true);
+ mutex_unlock(&cdm_hw->hw_mutex);
+
+}
+
+int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw,
+ uint32_t bl_count)
+{
+ uint32_t pending_bl = 0;
+ int32_t available_bl_slots = 0;
+ int rc = -EIO;
+ long time_left;
+ struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+ do {
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+ &pending_bl)) {
+ CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+ rc = -EIO;
+ break;
+ }
+ available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
+ if (available_bl_slots < 0) {
+ CAM_ERR(CAM_CDM, "Invalid available slots %d:%d:%d",
+ available_bl_slots, CAM_CDM_HWFIFO_SIZE,
+ pending_bl);
+ break;
+ }
+ if (bl_count < (available_bl_slots - 1)) {
+ CAM_DBG(CAM_CDM,
+ "BL slot available_cnt=%d requested=%d",
+ (available_bl_slots - 1), bl_count);
+ rc = bl_count;
+ break;
+ } else if (0 == (available_bl_slots - 1)) {
+ rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, true);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Enable BL done irq failed");
+ break;
+ }
+ time_left = wait_for_completion_timeout(
+ &core->bl_complete, msecs_to_jiffies(
+ CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
+ if (time_left <= 0) {
+ CAM_ERR(CAM_CDM,
+ "CDM HW BL Wait timed out failed");
+ if (cam_hw_cdm_enable_bl_done_irq(cdm_hw,
+ false))
+ CAM_ERR(CAM_CDM,
+ "Disable BL done irq failed");
+ rc = -EIO;
+ break;
+ }
+ if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, false))
+ CAM_ERR(CAM_CDM, "Disable BL done irq failed");
+ rc = 0;
+ CAM_DBG(CAM_CDM, "CDM HW is ready for data");
+ } else {
+ rc = (bl_count - (available_bl_slots - 1));
+ break;
+ }
+ } while (1);
+
+ return rc;
+}
+
+bool cam_hw_cdm_bl_write(struct cam_hw_info *cdm_hw, uint32_t src,
+ uint32_t len, uint32_t tag)
+{
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
+ CAM_ERR(CAM_CDM, "Failed to write CDM base to BL base");
+ return true;
+ }
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
+ ((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
+ CAM_ERR(CAM_CDM, "Failed to write CDM BL len");
+ return true;
+ }
+ return false;
+}
+
+bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
+{
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
+ CAM_ERR(CAM_CDM, "Failed to write CDM commit BL");
+ return true;
+ }
+ return false;
+}
+
+int cam_hw_cdm_submit_gen_irq(struct cam_hw_info *cdm_hw,
+ struct cam_cdm_hw_intf_cmd_submit_bl *req)
+{
+ struct cam_cdm_bl_cb_request_entry *node;
+ struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+ uint32_t len;
+ int rc;
+
+ if (core->bl_tag > 63) {
+ CAM_ERR(CAM_CDM, "bl_tag invalid =%d", core->bl_tag);
+ rc = -EINVAL;
+ goto end;
+ }
+ CAM_DBG(CAM_CDM, "CDM write BL last cmd tag=%x total=%d cookie=%d",
+ core->bl_tag, req->data->cmd_arrary_count, req->data->cookie);
+ node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
+ GFP_KERNEL);
+ if (!node) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+ node->client_hdl = req->handle;
+ node->cookie = req->data->cookie;
+ node->bl_tag = core->bl_tag;
+ node->userdata = req->data->userdata;
+ list_add_tail(&node->entry, &core->bl_request_list);
+ len = core->ops->cdm_required_size_genirq() * core->bl_tag;
+ core->ops->cdm_write_genirq(((uint32_t *)core->gen_irq.kmdvaddr + len),
+ core->bl_tag);
+ rc = cam_hw_cdm_bl_write(cdm_hw, (core->gen_irq.vaddr + (4*len)),
+ ((4 * core->ops->cdm_required_size_genirq()) - 1),
+ core->bl_tag);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CDM hw bl write failed for gen irq bltag=%d",
+ core->bl_tag);
+ list_del_init(&node->entry);
+ kfree(node);
+ rc = -EIO;
+ goto end;
+ }
+
+ if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+ CAM_ERR(CAM_CDM, "Cannot commit the genirq BL with tag tag=%d",
+ core->bl_tag);
+ list_del_init(&node->entry);
+ kfree(node);
+ rc = -EIO;
+ }
+
+end:
+ return rc;
+}
+
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+ struct cam_cdm_hw_intf_cmd_submit_bl *req,
+ struct cam_cdm_client *client)
+{
+ int i, rc;
+ struct cam_cdm_bl_request *cdm_cmd = req->data;
+ struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+ uint32_t pending_bl = 0;
+ int write_count = 0;
+
+ if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
+ pr_info("requested BL more than max size, cnt=%d max=%d",
+ req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
+ }
+
+ if (atomic_read(&core->error))
+ return -EIO;
+
+ mutex_lock(&cdm_hw->hw_mutex);
+ mutex_lock(&client->lock);
+ rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Cannot read the current BL depth");
+ mutex_unlock(&client->lock);
+ mutex_unlock(&cdm_hw->hw_mutex);
+ return rc;
+ }
+
+ for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+ uint64_t hw_vaddr_ptr = 0;
+ size_t len = 0;
+
+ if ((!cdm_cmd->cmd[i].len) &&
+ (cdm_cmd->cmd[i].len > 0x100000)) {
+ CAM_ERR(CAM_CDM,
+ "cmd len(%d) is invalid cnt=%d total cnt=%d",
+ cdm_cmd->cmd[i].len, i,
+ req->data->cmd_arrary_count);
+ rc = -EINVAL;
+ break;
+ }
+ if (atomic_read(&core->error)) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "In error state cnt=%d total cnt=%d\n",
+ i, req->data->cmd_arrary_count);
+ rc = -EIO;
+ break;
+ }
+ if (write_count == 0) {
+ write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
+ (req->data->cmd_arrary_count - i));
+ if (write_count < 0) {
+ CAM_ERR(CAM_CDM,
+ "wait for bl fifo failed %d:%d",
+ i, req->data->cmd_arrary_count);
+ rc = -EIO;
+ break;
+ }
+ } else {
+ write_count--;
+ }
+
+ if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+ rc = cam_mem_get_io_buf(
+ cdm_cmd->cmd[i].bl_addr.mem_handle,
+ core->iommu_hdl.non_secure, &hw_vaddr_ptr,
+ &len);
+ } else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
+ if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
+ CAM_ERR(CAM_CDM,
+ "Hw bl hw_iova is invalid %d:%d",
+ i, req->data->cmd_arrary_count);
+ rc = -EINVAL;
+ break;
+ }
+ rc = 0;
+ hw_vaddr_ptr =
+ (uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
+ len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
+ } else {
+ CAM_ERR(CAM_CDM,
+ "Only mem hdl/hw va type is supported %d",
+ req->data->type);
+ rc = -EINVAL;
+ break;
+ }
+
+ if ((!rc) && (hw_vaddr_ptr) && (len) &&
+ (len >= cdm_cmd->cmd[i].offset)) {
+ CAM_DBG(CAM_CDM, "Got the HW VA");
+ if (core->bl_tag >=
+ (CAM_CDM_HWFIFO_SIZE - 1))
+ core->bl_tag = 0;
+ rc = cam_hw_cdm_bl_write(cdm_hw,
+ ((uint32_t)hw_vaddr_ptr +
+ cdm_cmd->cmd[i].offset),
+ (cdm_cmd->cmd[i].len - 1), core->bl_tag);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d",
+ i, req->data->cmd_arrary_count);
+ rc = -EIO;
+ break;
+ }
+ } else {
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for hdl=%x len=%zu:%d",
+ cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+ cdm_cmd->cmd[i].offset);
+ CAM_ERR(CAM_CDM, "Sanity check failed for %d:%d",
+ i, req->data->cmd_arrary_count);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (!rc) {
+ CAM_DBG(CAM_CDM,
+ "write BL success for cnt=%d with tag=%d",
+ i, core->bl_tag);
+
+ CAM_DBG(CAM_CDM, "Now commit the BL");
+ if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+ CAM_ERR(CAM_CDM,
+ "Cannot commit the BL %d tag=%d",
+ i, core->bl_tag);
+ rc = -EIO;
+ break;
+ }
+ CAM_DBG(CAM_CDM, "BL commit success BL %d tag=%d", i,
+ core->bl_tag);
+ core->bl_tag++;
+ if ((req->data->flag == true) &&
+ (i == (req->data->cmd_arrary_count -
+ 1))) {
+ rc = cam_hw_cdm_submit_gen_irq(
+ cdm_hw, req);
+ if (rc == 0)
+ core->bl_tag++;
+ }
+ }
+ }
+ mutex_unlock(&client->lock);
+ mutex_unlock(&cdm_hw->hw_mutex);
+ return rc;
+
+}
+
+static void cam_hw_cdm_work(struct work_struct *work)
+{
+ struct cam_cdm_work_payload *payload;
+ struct cam_hw_info *cdm_hw;
+ struct cam_cdm *core;
+
+ payload = container_of(work, struct cam_cdm_work_payload, work);
+ if (payload) {
+ cdm_hw = payload->hw;
+ core = (struct cam_cdm *)cdm_hw->core_info;
+
+ CAM_DBG(CAM_CDM, "IRQ status=%x", payload->irq_status);
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+ struct cam_cdm_bl_cb_request_entry *node;
+
+ CAM_DBG(CAM_CDM, "inline IRQ data=%x",
+ payload->irq_data);
+ mutex_lock(&cdm_hw->hw_mutex);
+ node = cam_cdm_find_request_by_bl_tag(
+ payload->irq_data,
+ &core->bl_request_list);
+ if (node) {
+ if (node->request_type ==
+ CAM_HW_CDM_BL_CB_CLIENT) {
+ cam_cdm_notify_clients(cdm_hw,
+ CAM_CDM_CB_STATUS_BL_SUCCESS,
+ (void *)node);
+ } else if (node->request_type ==
+ CAM_HW_CDM_BL_CB_INTERNAL) {
+ CAM_ERR(CAM_CDM,
+ "Invalid node=%pK %d", node,
+ node->request_type);
+ }
+ list_del_init(&node->entry);
+ kfree(node);
+ } else {
+ CAM_ERR(CAM_CDM,
+ "Inval node, inline_irq st=%x data=%x",
+ payload->irq_status, payload->irq_data);
+ }
+ mutex_unlock(&cdm_hw->hw_mutex);
+ }
+
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
+ CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
+ complete(&core->reset_complete);
+ }
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
+ if (atomic_read(&core->bl_done)) {
+ CAM_DBG(CAM_CDM, "CDM HW BL done IRQ");
+ complete(&core->bl_complete);
+ }
+ }
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Invalid command IRQ, Need HW reset\n");
+ atomic_inc(&core->error);
+ cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+ }
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "AHB Error IRQ\n");
+ atomic_inc(&core->error);
+ cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+ atomic_dec(&core->error);
+ }
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Overflow Error IRQ\n");
+ atomic_inc(&core->error);
+ cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+ atomic_dec(&core->error);
+ }
+ kfree(payload);
+ } else {
+ CAM_ERR(CAM_CDM, "NULL payload");
+ }
+
+}
+
+static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token,
+ uint32_t buf_info)
+{
+ struct cam_hw_info *cdm_hw = NULL;
+ struct cam_cdm *core = NULL;
+
+ if (token) {
+ cdm_hw = (struct cam_hw_info *)token;
+ core = (struct cam_cdm *)cdm_hw->core_info;
+ atomic_inc(&core->error);
+ cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Page fault iova addr %pK\n",
+ (void *)iova);
+ cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
+ (void *)iova);
+ atomic_dec(&core->error);
+ } else {
+ CAM_ERR(CAM_CDM, "Invalid token");
+ }
+
+}
+
+irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *cdm_hw = data;
+ struct cam_cdm *cdm_core = cdm_hw->core_info;
+ struct cam_cdm_work_payload *payload;
+ bool work_status;
+
+ CAM_DBG(CAM_CDM, "Got irq");
+ payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
+ if (payload) {
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
+ &payload->irq_status)) {
+ CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ status");
+ }
+ if (!payload->irq_status) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid irq received\n");
+ kfree(payload);
+ return IRQ_HANDLED;
+ }
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
+ &payload->irq_data)) {
+ CAM_ERR(CAM_CDM,
+ "Failed to read CDM HW IRQ data");
+ }
+ }
+ CAM_DBG(CAM_CDM, "Got payload=%d", payload->irq_status);
+ payload->hw = cdm_hw;
+ INIT_WORK((struct work_struct *)&payload->work,
+ cam_hw_cdm_work);
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
+ payload->irq_status))
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ Clear");
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd");
+ work_status = queue_work(cdm_core->work_queue, &payload->work);
+ if (work_status == false) {
+ CAM_ERR(CAM_CDM, "Failed to queue work for irq=%x",
+ payload->irq_status);
+ kfree(payload);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv)
+{
+ struct cam_hw_info *cdm_hw = hw_priv;
+ struct cam_mem_mgr_request_desc genirq_alloc_cmd;
+ struct cam_mem_mgr_memory_desc genirq_alloc_out;
+ struct cam_cdm *cdm_core = NULL;
+ int rc = -EINVAL;
+
+ if (!hw_priv)
+ return rc;
+
+ cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+ genirq_alloc_cmd.align = 0;
+ genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
+ genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
+ genirq_alloc_cmd.flags = CAM_MEM_FLAG_HW_READ_WRITE;
+ rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
+ &genirq_alloc_out);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to get genirq cmd space rc=%d", rc);
+ goto end;
+ }
+ cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
+ cdm_core->gen_irq.vaddr = (genirq_alloc_out.iova & 0xFFFFFFFF);
+ cdm_core->gen_irq.kmdvaddr = genirq_alloc_out.kva;
+ cdm_core->gen_irq.size = genirq_alloc_out.len;
+
+end:
+ return rc;
+}
+
+int cam_hw_cdm_release_genirq_mem(void *hw_priv)
+{
+ struct cam_hw_info *cdm_hw = hw_priv;
+ struct cam_cdm *cdm_core = NULL;
+ struct cam_mem_mgr_memory_desc genirq_release_cmd;
+ int rc = -EINVAL;
+
+ if (!hw_priv)
+ return rc;
+
+ cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+ genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
+ rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
+ if (rc)
+ CAM_ERR(CAM_CDM, "Failed to put genirq cmd space for hw");
+
+ return rc;
+}
+
+int cam_hw_cdm_init(void *hw_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *cdm_hw = hw_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_cdm *cdm_core = NULL;
+ int rc;
+ long time_left;
+
+ if (!hw_priv)
+ return -EINVAL;
+
+ soc_info = &cdm_hw->soc_info;
+ cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, true);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Enable platform failed");
+ goto end;
+ }
+
+ CAM_DBG(CAM_CDM, "Enable soc done");
+
+/* Before triggering the reset to HW, clear the reset complete */
+ atomic_set(&cdm_core->error, 0);
+ atomic_set(&cdm_core->bl_done, 0);
+ reinit_completion(&cdm_core->reset_complete);
+ reinit_completion(&cdm_core->bl_complete);
+
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask");
+ goto disable_return;
+ }
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
+ goto disable_return;
+ }
+
+ CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone");
+ time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
+ msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
+
+ if (time_left <= 0) {
+ CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc);
+ goto disable_return;
+ } else {
+ CAM_DBG(CAM_CDM, "CDM Init success");
+ cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
+ rc = 0;
+ goto end;
+ }
+
+disable_return:
+ rc = -EIO;
+ cam_soc_util_disable_platform_resource(soc_info, true, true);
+end:
+ return rc;
+}
+
+int cam_hw_cdm_deinit(void *hw_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *cdm_hw = hw_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_cdm *cdm_core = NULL;
+ int rc = 0;
+
+ if (!hw_priv)
+ return -EINVAL;
+
+ soc_info = &cdm_hw->soc_info;
+ cdm_core = cdm_hw->core_info;
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "disable platform failed");
+ } else {
+ CAM_DBG(CAM_CDM, "CDM Deinit success");
+ cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ }
+
+ return rc;
+}
+
+int cam_hw_cdm_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct cam_hw_info *cdm_hw = NULL;
+ struct cam_hw_intf *cdm_hw_intf = NULL;
+ struct cam_cdm *cdm_core = NULL;
+ struct cam_cdm_private_dt_data *soc_private = NULL;
+ struct cam_cpas_register_params cpas_parms;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+
+ cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!cdm_hw_intf)
+ return -ENOMEM;
+
+ cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!cdm_hw) {
+ kfree(cdm_hw_intf);
+ return -ENOMEM;
+ }
+
+ cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+ if (!cdm_hw->core_info) {
+ kfree(cdm_hw);
+ kfree(cdm_hw_intf);
+ return -ENOMEM;
+ }
+
+ cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ cdm_hw->soc_info.pdev = pdev;
+ cdm_hw->soc_info.dev = &pdev->dev;
+ cdm_hw->soc_info.dev_name = pdev->name;
+ cdm_hw_intf->hw_type = CAM_HW_CDM;
+ cdm_hw->open_count = 0;
+ mutex_init(&cdm_hw->hw_mutex);
+ spin_lock_init(&cdm_hw->hw_lock);
+ init_completion(&cdm_hw->hw_complete);
+
+ rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to get dt properties");
+ goto release_mem;
+ }
+ cdm_hw_intf->hw_idx = cdm_hw->soc_info.index;
+ cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+ soc_private = (struct cam_cdm_private_dt_data *)
+ cdm_hw->soc_info.soc_private;
+ if (soc_private->dt_cdm_shared == true)
+ cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+ else
+ cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+ cdm_core->bl_tag = 0;
+ cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
+ if (cdm_core->id >= CAM_CDM_MAX) {
+ CAM_ERR(CAM_CDM, "Failed to get CDM HW name for %s",
+ cdm_core->name);
+ goto release_private_mem;
+ }
+ INIT_LIST_HEAD(&cdm_core->bl_request_list);
+ init_completion(&cdm_core->reset_complete);
+ init_completion(&cdm_core->bl_complete);
+ cdm_hw_intf->hw_priv = cdm_hw;
+ cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+ cdm_hw_intf->hw_ops.init = cam_hw_cdm_init;
+ cdm_hw_intf->hw_ops.deinit = cam_hw_cdm_deinit;
+ cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+ cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+ cdm_hw_intf->hw_ops.read = NULL;
+ cdm_hw_intf->hw_ops.write = NULL;
+ cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+ mutex_lock(&cdm_hw->hw_mutex);
+
+ CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
+ cdm_hw_intf->hw_idx);
+
+ platform_set_drvdata(pdev, cdm_hw_intf);
+
+ rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
+ if (rc < 0) {
+ CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed");
+ goto unlock_release_mem;
+ }
+ cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+ cam_hw_cdm_iommu_fault_handler, cdm_hw);
+
+ rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
+ if (rc < 0) {
+ CAM_ERR(CAM_CDM, "Attach iommu non secure handle failed");
+ goto destroy_non_secure_hdl;
+ }
+ cdm_core->iommu_hdl.secure = -1;
+
+ cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+ CAM_CDM_INFLIGHT_WORKS);
+
+ rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
+ cam_hw_cdm_irq, cdm_hw);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to request platform resource");
+ goto destroy_non_secure_hdl;
+ }
+
+ cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+ cpas_parms.cell_index = cdm_hw->soc_info.index;
+ cpas_parms.dev = &pdev->dev;
+ cpas_parms.userdata = cdm_hw_intf;
+ strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
+ rc = cam_cpas_register_client(&cpas_parms);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
+ goto release_platform_resource;
+ }
+ CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
+ cpas_parms.client_handle);
+ cdm_core->cpas_handle = cpas_parms.client_handle;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ rc = cam_cpas_start(cdm_core->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CPAS start failed");
+ goto cpas_unregister;
+ }
+
+ rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to Init CDM HW");
+ goto cpas_stop;
+ }
+ cdm_hw->open_count++;
+
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+ &cdm_core->hw_version)) {
+ CAM_ERR(CAM_CDM, "Failed to read CDM HW Version");
+ goto deinit;
+ }
+
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_TITAN_VERSION,
+ &cdm_core->hw_family_version)) {
+ CAM_ERR(CAM_CDM, "Failed to read CDM family Version");
+ goto deinit;
+ }
+
+ CAM_DBG(CAM_CDM, "CDM Hw version read success family =%x hw =%x",
+ cdm_core->hw_family_version, cdm_core->hw_version);
+ cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
+ false);
+ if (!cdm_core->ops) {
+ CAM_ERR(CAM_CDM, "Failed to util ops for hw");
+ goto deinit;
+ }
+
+ if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
+ &cdm_core->version)) {
+ CAM_ERR(CAM_CDM, "Failed to set cam he version for hw");
+ goto deinit;
+ }
+
+ rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to Deinit CDM HW");
+ cdm_hw->open_count--;
+ goto cpas_stop;
+ }
+
+ rc = cam_cpas_stop(cdm_core->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
+ cdm_hw->open_count--;
+ goto cpas_unregister;
+ }
+
+ rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+ soc_private, CAM_HW_CDM, &cdm_core->index);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "HW CDM Interface registration failed");
+ cdm_hw->open_count--;
+ goto cpas_unregister;
+ }
+ cdm_hw->open_count--;
+ mutex_unlock(&cdm_hw->hw_mutex);
+
+ CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
+
+ return rc;
+
+deinit:
+ if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
+ CAM_ERR(CAM_CDM, "Deinit failed for hw");
+ cdm_hw->open_count--;
+cpas_stop:
+ if (cam_cpas_stop(cdm_core->cpas_handle))
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
+cpas_unregister:
+ if (cam_cpas_unregister_client(cdm_core->cpas_handle))
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
+release_platform_resource:
+ if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+ CAM_ERR(CAM_CDM, "Release platform resource failed");
+
+ flush_workqueue(cdm_core->work_queue);
+ destroy_workqueue(cdm_core->work_queue);
+destroy_non_secure_hdl:
+ cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+ NULL, cdm_hw);
+ if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+ CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
+unlock_release_mem:
+ mutex_unlock(&cdm_hw->hw_mutex);
+release_private_mem:
+ kfree(cdm_hw->soc_info.soc_private);
+release_mem:
+ mutex_destroy(&cdm_hw->hw_mutex);
+ kfree(cdm_hw_intf);
+ kfree(cdm_hw->core_info);
+ kfree(cdm_hw);
+ return rc;
+}
+
+int cam_hw_cdm_remove(struct platform_device *pdev)
+{
+ int rc = -EBUSY;
+ struct cam_hw_info *cdm_hw = NULL;
+ struct cam_hw_intf *cdm_hw_intf = NULL;
+ struct cam_cdm *cdm_core = NULL;
+
+ cdm_hw_intf = platform_get_drvdata(pdev);
+ if (!cdm_hw_intf) {
+ CAM_ERR(CAM_CDM, "Failed to get dev private data");
+ return rc;
+ }
+
+ cdm_hw = cdm_hw_intf->hw_priv;
+ if (!cdm_hw) {
+ CAM_ERR(CAM_CDM,
+ "Failed to get hw private data for type=%d idx=%d",
+ cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+ return rc;
+ }
+
+ cdm_core = cdm_hw->core_info;
+ if (!cdm_core) {
+ CAM_ERR(CAM_CDM,
+ "Failed to get hw core data for type=%d idx=%d",
+ cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+ return rc;
+ }
+
+ if (cdm_hw->open_count != 0) {
+ CAM_ERR(CAM_CDM, "Hw open count invalid type=%d idx=%d cnt=%d",
+ cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
+ cdm_hw->open_count);
+ return rc;
+ }
+
+ rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Deinit failed for hw");
+ return rc;
+ }
+
+ rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
+ return rc;
+ }
+
+ if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+ CAM_ERR(CAM_CDM, "Release platform resource failed");
+
+ flush_workqueue(cdm_core->work_queue);
+ destroy_workqueue(cdm_core->work_queue);
+
+ if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+ CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
+ cam_smmu_unset_client_page_fault_handler(
+ cdm_core->iommu_hdl.non_secure, cdm_hw);
+
+ mutex_destroy(&cdm_hw->hw_mutex);
+ kfree(cdm_hw->soc_info.soc_private);
+ kfree(cdm_hw_intf);
+ kfree(cdm_hw->core_info);
+ kfree(cdm_hw);
+
+ return 0;
+}
+
+static struct platform_driver cam_hw_cdm_driver = {
+ .probe = cam_hw_cdm_probe,
+ .remove = cam_hw_cdm_remove,
+ .driver = {
+ .name = "msm_cam_cdm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cam_hw_cdm_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_hw_cdm_init_module(void)
+{
+ return platform_driver_register(&cam_hw_cdm_driver);
+}
+
+static void __exit cam_hw_cdm_exit_module(void)
+{
+ platform_driver_unregister(&cam_hw_cdm_driver);
+}
+
+module_init(cam_hw_cdm_init_module);
+module_exit(cam_hw_cdm_exit_module);
+MODULE_DESCRIPTION("MSM Camera HW CDM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_intf.c
new file mode 100644
index 000000000000..871276170ac4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_intf.c
@@ -0,0 +1,582 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_virtual.h"
+#include "cam_soc_util.h"
+#include "cam_cdm_soc.h"
+
+static struct cam_cdm_intf_mgr cdm_mgr;
+static DEFINE_MUTEX(cam_cdm_mgr_lock);
+
+static const struct of_device_id msm_cam_cdm_intf_dt_match[] = {
+ { .compatible = "qcom,cam-cdm-intf", },
+ {}
+};
+
+static int get_cdm_mgr_refcount(void)
+{
+ int rc = 0;
+
+ mutex_lock(&cam_cdm_mgr_lock);
+ if (cdm_mgr.probe_done == false) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
+ rc = -EPERM;
+ } else {
+ CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d",
+ cdm_mgr.refcount);
+ cdm_mgr.refcount++;
+ }
+ mutex_unlock(&cam_cdm_mgr_lock);
+ return rc;
+}
+
+static void put_cdm_mgr_refcount(void)
+{
+ mutex_lock(&cam_cdm_mgr_lock);
+ if (cdm_mgr.probe_done == false) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
+ } else {
+ CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d",
+ cdm_mgr.refcount);
+ if (cdm_mgr.refcount > 0) {
+ cdm_mgr.refcount--;
+ } else {
+ CAM_ERR(CAM_CDM, "Refcount put when zero");
+ WARN_ON(1);
+ }
+ }
+ mutex_unlock(&cam_cdm_mgr_lock);
+}
+
+static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
+ uint32_t hw_idx)
+{
+ int rc = -EPERM;
+ struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
+
+ if (hw->hw_ops.get_hw_caps) {
+ rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles,
+ sizeof(struct cam_iommu_handle));
+ }
+
+ return rc;
+}
+
+static int get_cdm_index_by_id(char *identifier,
+ uint32_t cell_index, uint32_t *hw_index)
+{
+ int rc = -EPERM, i, j;
+ char client_name[128];
+
+ CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d",
+ identifier, cell_index);
+ snprintf(client_name, sizeof(client_name), "%s", identifier);
+ CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name,
+ cdm_mgr.cdm_count);
+ mutex_lock(&cam_cdm_mgr_lock);
+ for (i = 0; i < cdm_mgr.cdm_count; i++) {
+ mutex_lock(&cdm_mgr.nodes[i].lock);
+ CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d",
+ cdm_mgr.nodes[i].data->dt_num_supported_clients);
+
+ for (j = 0; j <
+ cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
+ CAM_DBG(CAM_CDM, "client name:%s",
+ cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
+ if (!strcmp(
+ cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+ client_name)) {
+ rc = 0;
+ *hw_index = i;
+ break;
+ }
+ }
+ mutex_unlock(&cdm_mgr.nodes[i].lock);
+ if (rc == 0)
+ break;
+ }
+ mutex_unlock(&cam_cdm_mgr_lock);
+
+ return rc;
+}
+
+int cam_cdm_get_iommu_handle(char *identifier,
+ struct cam_iommu_handle *cdm_handles)
+{
+ int i, j, rc = -EPERM;
+
+ if ((!identifier) || (!cdm_handles))
+ return -EINVAL;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ return rc;
+ }
+ CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier);
+
+ for (i = 0; i < cdm_mgr.cdm_count; i++) {
+ mutex_lock(&cdm_mgr.nodes[i].lock);
+ if (!cdm_mgr.nodes[i].data) {
+ mutex_unlock(&cdm_mgr.nodes[i].lock);
+ continue;
+ }
+ for (j = 0; j <
+ cdm_mgr.nodes[i].data->dt_num_supported_clients;
+ j++) {
+ if (!strcmp(
+ cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+ identifier)) {
+ rc = get_cdm_iommu_handle(cdm_handles, i);
+ break;
+ }
+ }
+ mutex_unlock(&cdm_mgr.nodes[i].lock);
+ if (rc == 0)
+ break;
+ }
+ put_cdm_mgr_refcount();
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cdm_get_iommu_handle);
+
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
+{
+ int rc = -EPERM;
+ struct cam_hw_intf *hw;
+ uint32_t hw_index = 0;
+
+ if ((!data) || (!data->base_array_cnt))
+ return -EINVAL;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ return rc;
+ }
+
+ if (data->id > CAM_CDM_HW_ANY) {
+ CAM_ERR(CAM_CDM,
+ "only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported");
+ rc = -EPERM;
+ goto end;
+ }
+ rc = get_cdm_index_by_id(data->identifier, data->cell_index,
+ &hw_index);
+ if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
+ CAM_ERR(CAM_CDM, "Failed to identify associated hw id");
+ goto end;
+ } else {
+ CAM_DBG(CAM_CDM, "hw_index:%d", hw_index);
+ hw = cdm_mgr.nodes[hw_index].device;
+ if (hw && hw->hw_ops.process_cmd) {
+ rc = hw->hw_ops.process_cmd(hw->hw_priv,
+ CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
+ sizeof(struct cam_cdm_acquire_data));
+ if (rc < 0) {
+ CAM_ERR(CAM_CDM, "CDM hw acquire failed");
+ goto end;
+ }
+ } else {
+ CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops",
+ hw_index);
+ rc = -EPERM;
+ }
+ }
+end:
+ if (rc < 0) {
+ CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d",
+ data->id, data->identifier, data->cell_index);
+ put_cdm_mgr_refcount();
+ }
+ return rc;
+}
+EXPORT_SYMBOL(cam_cdm_acquire);
+
+int cam_cdm_release(uint32_t handle)
+{
+ uint32_t hw_index;
+ int rc = -EPERM;
+ struct cam_hw_intf *hw;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ return rc;
+ }
+
+ hw_index = CAM_CDM_GET_HW_IDX(handle);
+ if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+ hw = cdm_mgr.nodes[hw_index].device;
+ if (hw && hw->hw_ops.process_cmd) {
+ rc = hw->hw_ops.process_cmd(hw->hw_priv,
+ CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
+ sizeof(handle));
+ if (rc < 0)
+ CAM_ERR(CAM_CDM,
+ "hw release failed for handle=%x",
+ handle);
+ } else
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
+ hw_index);
+ }
+ put_cdm_mgr_refcount();
+ if (rc == 0)
+ put_cdm_mgr_refcount();
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cdm_release);
+
+
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
+{
+ uint32_t hw_index;
+ int rc = -EINVAL;
+ struct cam_hw_intf *hw;
+
+ if (!data)
+ return rc;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
+ return rc;
+ }
+
+ hw_index = CAM_CDM_GET_HW_IDX(handle);
+ if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+ struct cam_cdm_hw_intf_cmd_submit_bl req;
+
+ hw = cdm_mgr.nodes[hw_index].device;
+ if (hw && hw->hw_ops.process_cmd) {
+ req.data = data;
+ req.handle = handle;
+ rc = hw->hw_ops.process_cmd(hw->hw_priv,
+ CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
+ sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
+ if (rc < 0)
+ CAM_ERR(CAM_CDM,
+ "hw submit bl failed for handle=%x",
+ handle);
+ } else {
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops",
+ hw_index);
+ }
+ }
+ put_cdm_mgr_refcount();
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cdm_submit_bls);
+
+int cam_cdm_stream_on(uint32_t handle)
+{
+ uint32_t hw_index;
+ int rc = -EINVAL;
+ struct cam_hw_intf *hw;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
+ return rc;
+ }
+
+ hw_index = CAM_CDM_GET_HW_IDX(handle);
+ if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+ hw = cdm_mgr.nodes[hw_index].device;
+ if (hw && hw->hw_ops.start) {
+ rc = hw->hw_ops.start(hw->hw_priv, &handle,
+ sizeof(uint32_t));
+ if (rc < 0)
+ CAM_ERR(CAM_CDM,
+ "hw start failed handle=%x",
+ handle);
+ } else {
+ CAM_ERR(CAM_CDM,
+ "hw idx %d doesn't have start ops",
+ hw_index);
+ }
+ }
+ put_cdm_mgr_refcount();
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_on);
+
+int cam_cdm_stream_off(uint32_t handle)
+{
+ uint32_t hw_index;
+ int rc = -EINVAL;
+ struct cam_hw_intf *hw;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
+ return rc;
+ }
+
+ hw_index = CAM_CDM_GET_HW_IDX(handle);
+ if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+ hw = cdm_mgr.nodes[hw_index].device;
+ if (hw && hw->hw_ops.stop) {
+ rc = hw->hw_ops.stop(hw->hw_priv, &handle,
+ sizeof(uint32_t));
+ if (rc < 0)
+ CAM_ERR(CAM_CDM, "hw stop failed handle=%x",
+ handle);
+ } else {
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops",
+ hw_index);
+ }
+ }
+ put_cdm_mgr_refcount();
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_off);
+
+int cam_cdm_reset_hw(uint32_t handle)
+{
+ uint32_t hw_index;
+ int rc = -EINVAL;
+ struct cam_hw_intf *hw;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
+ return rc;
+ }
+
+ hw_index = CAM_CDM_GET_HW_IDX(handle);
+ if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+ hw = cdm_mgr.nodes[hw_index].device;
+ if (hw && hw->hw_ops.process_cmd) {
+ rc = hw->hw_ops.process_cmd(hw->hw_priv,
+ CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
+ sizeof(handle));
+ if (rc < 0)
+ CAM_ERR(CAM_CDM,
+ "CDM hw release failed for handle=%x",
+ handle);
+ } else {
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
+ hw_index);
+ }
+ }
+ put_cdm_mgr_refcount();
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cdm_reset_hw);
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+ struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+ uint32_t *index)
+{
+ int rc = -EINVAL;
+
+ if ((!hw) || (!data) || (!index))
+ return rc;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ return rc;
+ }
+
+ mutex_lock(&cam_cdm_mgr_lock);
+ if ((type == CAM_VIRTUAL_CDM) &&
+ (!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) {
+ mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+ cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw;
+ cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data;
+ *index = cdm_mgr.cdm_count;
+ mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+ cdm_mgr.cdm_count++;
+ rc = 0;
+ } else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) {
+ mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+ cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw;
+ cdm_mgr.nodes[cdm_mgr.cdm_count].data = data;
+ *index = cdm_mgr.cdm_count;
+ mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+ cdm_mgr.cdm_count++;
+ rc = 0;
+ } else {
+ CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d",
+ type, cdm_mgr.cdm_count);
+ }
+ mutex_unlock(&cam_cdm_mgr_lock);
+ put_cdm_mgr_refcount();
+
+ return rc;
+}
+
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+ struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+ uint32_t index)
+{
+ int rc = -EINVAL;
+
+ if ((!hw) || (!data))
+ return rc;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
+ return rc;
+ }
+
+ mutex_lock(&cam_cdm_mgr_lock);
+ if ((type == CAM_VIRTUAL_CDM) &&
+ (hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) &&
+ (index == CAM_SW_CDM_INDEX)) {
+ mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+ cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL;
+ cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL;
+ mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+ rc = 0;
+ } else if ((type == CAM_HW_CDM) &&
+ (hw == cdm_mgr.nodes[index].device)) {
+ mutex_lock(&cdm_mgr.nodes[index].lock);
+ cdm_mgr.nodes[index].device = NULL;
+ cdm_mgr.nodes[index].data = NULL;
+ mutex_unlock(&cdm_mgr.nodes[index].lock);
+ cdm_mgr.cdm_count--;
+ rc = 0;
+ } else {
+ CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d",
+ type, index);
+ }
+ mutex_unlock(&cam_cdm_mgr_lock);
+ put_cdm_mgr_refcount();
+
+ return rc;
+}
+
+static int cam_cdm_intf_probe(struct platform_device *pdev)
+{
+ int i, rc;
+
+ rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to get dt properties");
+ return rc;
+ }
+ mutex_lock(&cam_cdm_mgr_lock);
+ for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+ mutex_init(&cdm_mgr.nodes[i].lock);
+ cdm_mgr.nodes[i].device = NULL;
+ cdm_mgr.nodes[i].data = NULL;
+ cdm_mgr.nodes[i].refcount = 0;
+ }
+ cdm_mgr.probe_done = true;
+ cdm_mgr.refcount = 0;
+ mutex_unlock(&cam_cdm_mgr_lock);
+ rc = cam_virtual_cdm_probe(pdev);
+ if (rc) {
+ mutex_lock(&cam_cdm_mgr_lock);
+ cdm_mgr.probe_done = false;
+ for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+ if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+ (cdm_mgr.nodes[i].refcount != 0))
+ CAM_ERR(CAM_CDM,
+ "Valid node present in index=%d", i);
+ mutex_destroy(&cdm_mgr.nodes[i].lock);
+ cdm_mgr.nodes[i].device = NULL;
+ cdm_mgr.nodes[i].data = NULL;
+ cdm_mgr.nodes[i].refcount = 0;
+ }
+ mutex_unlock(&cam_cdm_mgr_lock);
+ }
+
+ CAM_DBG(CAM_CDM, "CDM Intf probe done");
+
+ return rc;
+}
+
+static int cam_cdm_intf_remove(struct platform_device *pdev)
+{
+ int i, rc = -EBUSY;
+
+ if (get_cdm_mgr_refcount()) {
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ return rc;
+ }
+
+ if (cam_virtual_cdm_remove(pdev)) {
+ CAM_ERR(CAM_CDM, "Virtual CDM remove failed");
+ goto end;
+ }
+ put_cdm_mgr_refcount();
+
+ mutex_lock(&cam_cdm_mgr_lock);
+ if (cdm_mgr.refcount != 0) {
+ CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d",
+ cdm_mgr.refcount);
+ goto end;
+ }
+
+ for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+ if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+ (cdm_mgr.nodes[i].refcount != 0)) {
+ CAM_ERR(CAM_CDM, "Valid node present in index=%d", i);
+ mutex_unlock(&cam_cdm_mgr_lock);
+ goto end;
+ }
+ mutex_destroy(&cdm_mgr.nodes[i].lock);
+ cdm_mgr.nodes[i].device = NULL;
+ cdm_mgr.nodes[i].data = NULL;
+ cdm_mgr.nodes[i].refcount = 0;
+ }
+ cdm_mgr.probe_done = false;
+ rc = 0;
+
+end:
+ mutex_unlock(&cam_cdm_mgr_lock);
+ return rc;
+}
+
+static struct platform_driver cam_cdm_intf_driver = {
+ .probe = cam_cdm_intf_probe,
+ .remove = cam_cdm_intf_remove,
+ .driver = {
+ .name = "msm_cam_cdm_intf",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cam_cdm_intf_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_cdm_intf_init_module(void)
+{
+ return platform_driver_register(&cam_cdm_intf_driver);
+}
+
+static void __exit cam_cdm_intf_exit_module(void)
+{
+ platform_driver_unregister(&cam_cdm_intf_driver);
+}
+
+module_init(cam_cdm_intf_init_module);
+module_exit(cam_cdm_intf_exit_module);
+MODULE_DESCRIPTION("MSM Camera CDM Intf driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_intf_api.h b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_intf_api.h
new file mode 100644
index 000000000000..6aa6e6d2bc4c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_intf_api.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_API_H_
+#define _CAM_CDM_API_H_
+
+#include <media/cam_defs.h>
+#include "cam_cdm_util.h"
+#include "cam_soc_util.h"
+
+/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
+enum cam_cdm_id {
+ CAM_CDM_VIRTUAL,
+ CAM_CDM_HW_ANY,
+ CAM_CDM_CPAS_0,
+ CAM_CDM_IPE0,
+ CAM_CDM_IPE1,
+ CAM_CDM_BPS,
+ CAM_CDM_VFE,
+ CAM_CDM_MAX
+};
+
+/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */
+enum cam_cdm_cb_status {
+ CAM_CDM_CB_STATUS_BL_SUCCESS,
+ CAM_CDM_CB_STATUS_INVALID_BL_CMD,
+ CAM_CDM_CB_STATUS_PAGEFAULT,
+ CAM_CDM_CB_STATUS_HW_RESET_ONGOING,
+ CAM_CDM_CB_STATUS_HW_RESET_DONE,
+ CAM_CDM_CB_STATUS_UNKNOWN_ERROR,
+};
+
+/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */
+enum cam_cdm_bl_cmd_addr_type {
+ CAM_CDM_BL_CMD_TYPE_MEM_HANDLE,
+ CAM_CDM_BL_CMD_TYPE_HW_IOVA,
+ CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA,
+};
+
+/**
+ * struct cam_cdm_acquire_data - Cam CDM acquire data structure
+ *
+ * @identifier : Input identifier string which is the device label from dt
+ * like vfe, ife, jpeg etc
+ * @cell_index : Input integer identifier pointing to the cell index from dt
+ * of the device. This can be used to form a unique string
+ * with @identifier like vfe0, ife1, jpeg0 etc
+ * @id : ID of a specific or any CDM HW which needs to be acquired.
+ * @userdata : Input private data which will be returned as part
+ * of callback.
+ * @cam_cdm_callback : Input callback pointer for triggering the
+ * callbacks from CDM driver
+ * @handle : CDM Client handle
+ * @userdata : Private data given at the time of acquire
+ * @status : Callback status
+ * @cookie : Cookie if the callback is gen irq status
+ * @base_array_cnt : Input number of ioremapped address pair pointing
+ * in base_array, needed only if selected cdm is a virtual.
+ * @base_array : Input pointer to ioremapped address pair arrary
+ * needed only if selected cdm is a virtual.
+ * @cdm_version : CDM version is output while acquiring HW cdm and
+ * it is Input while acquiring virtual cdm, Currently fixing it
+ * to one version below acquire API.
+ * @ops : Output pointer updated by cdm driver to the CDM
+ * util ops for this HW version of CDM acquired.
+ * @handle : Output Unique handle generated for this acquire
+ *
+ */
+struct cam_cdm_acquire_data {
+ char identifier[128];
+ uint32_t cell_index;
+ enum cam_cdm_id id;
+ void *userdata;
+ void (*cam_cdm_callback)(uint32_t handle, void *userdata,
+ enum cam_cdm_cb_status status, uint64_t cookie);
+ uint32_t base_array_cnt;
+ struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
+ struct cam_hw_version cdm_version;
+ struct cam_cdm_utils_ops *ops;
+ uint32_t handle;
+};
+
+/**
+ * struct cam_cdm_bl_cmd - Cam CDM HW bl command
+ *
+ * @bl_addr : Union of all three type for CDM BL commands
+ * @mem_handle : Input mem handle of bl cmd
+ * @offset : Input offset of the actual bl cmd in the memory pointed
+ * by mem_handle
+ * @len : Input length of the BL command, Cannot be more than 1MB and
+ * this is will be validated with offset+size of the memory pointed
+ * by mem_handle
+ *
+ */
+struct cam_cdm_bl_cmd {
+ union {
+ int32_t mem_handle;
+ uint32_t *hw_iova;
+ uintptr_t kernel_iova;
+ } bl_addr;
+ uint32_t offset;
+ uint32_t len;
+};
+
+/**
+ * struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request
+ *
+ * @flag : 1 for callback needed and 0 for no callback when this BL
+ * request is done
+ * @userdata :Input private data which will be returned as part
+ * of callback if request for this bl request in flags.
+ * @cookie : Cookie if the callback is gen irq status
+ * @type : type of the submitted bl cmd address.
+ * @cmd_arrary_count : Input number of BL commands to be submitted to CDM
+ * @bl_cmd_array : Input payload holding the BL cmd's arrary
+ * to be sumbitted.
+ *
+ */
+struct cam_cdm_bl_request {
+ int flag;
+ void *userdata;
+ uint64_t cookie;
+ enum cam_cdm_bl_cmd_addr_type type;
+ uint32_t cmd_arrary_count;
+ struct cam_cdm_bl_cmd cmd[1];
+};
+
+/**
+ * @brief : API to get the CDM capabilities for a camera device type
+ *
+ * @identifier : Input pointer to a string which is the device label from dt
+ * like vfe, ife, jpeg etc, We do not need cell index
+ * assuming all devices of a single type maps to one SMMU
+ * client
+ * @cdm_handles : Input iommu handle memory pointer to update handles
+ *
+ * @return 0 on success
+ */
+int cam_cdm_get_iommu_handle(char *identifier,
+ struct cam_iommu_handle *cdm_handles);
+
+/**
+ * @brief : API to acquire a CDM
+ *
+ * @data : Input data for the CDM to be acquired
+ *
+ * @return 0 on success
+ */
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data);
+
+/**
+ * @brief : API to release a previously acquired CDM
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_release(uint32_t handle);
+
+/**
+ * @brief : API to submit the base & length (BL's) for acquired CDM
+ *
+ * @handle : Input cdm handle to which the BL's needs to be sumbitted.
+ * @data : Input pointer to the BL's to be sumbitted
+ *
+ * @return 0 on success
+ */
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data);
+
+/**
+ * @brief : API to stream ON a previously acquired CDM,
+ * during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_on(uint32_t handle);
+
+/**
+ * @brief : API to stream OFF a previously acquired CDM,
+ * during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_off(uint32_t handle);
+
+/**
+ * @brief : API to reset previously acquired CDM,
+ * this can be only performed only the CDM is private.
+ *
+ * @handle : Input handle of the CDM to reset
+ *
+ * @return 0 on success
+ */
+int cam_cdm_reset_hw(uint32_t handle);
+
+#endif /* _CAM_CDM_API_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_soc.c b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_soc.c
new file mode 100644
index 000000000000..6e1ae20570a6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_soc.c
@@ -0,0 +1,208 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset)
+#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute)
+
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+ enum cam_cdm_regs reg, uint32_t *value)
+{
+ void __iomem *reg_addr;
+ struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+ void __iomem *base =
+ cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+ resource_size_t mem_len =
+ cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+ CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base,
+ (int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
+ reg)));
+ CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base,
+ reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
+
+ if ((reg > cdm->offset_tbl->offset_max_size) ||
+ (reg > cdm->offset_tbl->last_offset)) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg);
+ goto permission_error;
+ } else {
+ reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
+ cdm->offset_tbl, reg)));
+ if (reg_addr > (base + mem_len)) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Invalid mapped region %d", reg);
+ goto permission_error;
+ }
+ *value = cam_io_r_mb(reg_addr);
+ CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x",
+ (void __iomem *)base, reg,
+ (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)),
+ *value);
+ return false;
+ }
+permission_error:
+ *value = 0;
+ return true;
+
+}
+
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+ enum cam_cdm_regs reg, uint32_t value)
+{
+ void __iomem *reg_addr;
+ struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+ void __iomem *base =
+ cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+ resource_size_t mem_len =
+ cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+ CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base,
+ reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
+
+ if ((reg > cdm->offset_tbl->offset_max_size) ||
+ (reg > cdm->offset_tbl->last_offset)) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n",
+ reg);
+ goto permission_error;
+ } else {
+ reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
+ cdm->offset_tbl, reg));
+ if (reg_addr > (base + mem_len)) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Accessing invalid region %d:%d\n",
+ reg, (CAM_CDM_OFFSET_FROM_REG(
+ cdm->offset_tbl, reg)));
+ goto permission_error;
+ }
+ cam_io_w_mb(value, reg_addr);
+ return false;
+ }
+permission_error:
+ return true;
+
+}
+
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+ struct cam_cdm_private_dt_data *ptr)
+{
+ int i, rc = -EINVAL;
+
+ ptr->dt_num_supported_clients = of_property_count_strings(
+ pdev->dev.of_node,
+ "cdm-client-names");
+ CAM_DBG(CAM_CDM, "Num supported cdm_client = %d",
+ ptr->dt_num_supported_clients);
+ if (ptr->dt_num_supported_clients >
+ CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
+ CAM_ERR(CAM_CDM, "Invalid count of client names count=%d",
+ ptr->dt_num_supported_clients);
+ rc = -EINVAL;
+ return rc;
+ }
+ if (ptr->dt_num_supported_clients < 0) {
+ CAM_DBG(CAM_CDM, "No cdm client names found");
+ ptr->dt_num_supported_clients = 0;
+ ptr->dt_cdm_shared = false;
+ } else {
+ ptr->dt_cdm_shared = true;
+ }
+ for (i = 0; i < ptr->dt_num_supported_clients; i++) {
+ rc = of_property_read_string_index(pdev->dev.of_node,
+ "cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
+ CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s", i,
+ ptr->dt_cdm_client_name[i]);
+ if (rc < 0) {
+ CAM_ERR(CAM_CDM, "Reading cdm-client-names failed");
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+ const struct of_device_id *table)
+{
+ int rc;
+ struct cam_hw_soc_info *soc_ptr;
+ const struct of_device_id *id;
+
+ if (!cdm_hw || (cdm_hw->soc_info.soc_private)
+ || !(cdm_hw->soc_info.pdev))
+ return -EINVAL;
+
+ soc_ptr = &cdm_hw->soc_info;
+
+ rc = cam_soc_util_get_dt_properties(soc_ptr);
+ if (rc != 0) {
+ CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties");
+ } else {
+ soc_ptr->soc_private = kzalloc(
+ sizeof(struct cam_cdm_private_dt_data),
+ GFP_KERNEL);
+ if (!soc_ptr->soc_private)
+ return -ENOMEM;
+
+ rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
+ soc_ptr->soc_private);
+ if (rc != 0) {
+ CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
+ goto error;
+ }
+ id = of_match_node(table, soc_ptr->pdev->dev.of_node);
+ if ((!id) || !(id->data)) {
+ CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table");
+ goto error;
+ }
+ CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible);
+ ((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
+ (struct cam_cdm_reg_offset_table *)id->data;
+ strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
+ id->compatible,
+ sizeof(((struct cam_cdm *)cdm_hw->core_info)->name));
+ }
+
+ return rc;
+
+error:
+ rc = -EINVAL;
+ kfree(soc_ptr->soc_private);
+ soc_ptr->soc_private = NULL;
+ return rc;
+}
+
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+ struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr)
+{
+ int rc;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "num-hw-cdm", &mgr->dt_supported_hw_cdm);
+ CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d",
+ mgr->dt_supported_hw_cdm);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_soc.h b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_soc.h
new file mode 100644
index 000000000000..9a018617dad9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_soc.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_SOC_H_
+#define _CAM_CDM_SOC_H_
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+ const struct of_device_id *table);
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+ enum cam_cdm_regs reg, uint32_t *value);
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+ enum cam_cdm_regs reg, uint32_t value);
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+ struct platform_device *pdev,
+ struct cam_cdm_intf_mgr *mgr);
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+ struct cam_cdm_private_dt_data *ptr);
+
+#endif /* _CAM_CDM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_util.c
new file mode 100644
index 000000000000..a97a51965ae3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_util.c
@@ -0,0 +1,716 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_DWORD 4
+
+#define CAM_CDM_SW_CMD_COUNT 2
+#define CAM_CMD_LENGTH_MASK 0xFFFF
+#define CAM_CDM_COMMAND_OFFSET 24
+#define CAM_CDM_REG_OFFSET_MASK 0x00FFFFFF
+
+#define CAM_CDM_DMI_DATA_HI_OFFSET 8
+#define CAM_CDM_DMI_DATA_LO_OFFSET 12
+
+static unsigned int CDMCmdHeaderSizes[
+ CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = {
+ 0, /* UNUSED*/
+ 3, /* DMI*/
+ 0, /* UNUSED*/
+ 2, /* RegContinuous*/
+ 1, /* RegRandom*/
+ 2, /* BUFFER_INDIREC*/
+ 2, /* GenerateIRQ*/
+ 3, /* WaitForEvent*/
+ 1, /* ChangeBase*/
+ 1, /* PERF_CONTROL*/
+ 3, /* DMI32*/
+ 3, /* DMI64*/
+};
+
+/**
+ * struct cdm_regrandom_cmd - Definition for CDM random register command.
+ * @count: Number of register writes
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ */
+struct cdm_regrandom_cmd {
+ unsigned int count : 16;
+ unsigned int reserved : 8;
+ unsigned int cmd : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_regcontinuous_cmd - Definition for a CDM register range command.
+ * @count: Number of register writes
+ * @reserved0: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @offset: Start address of the range of registers
+ * @reserved1: reserved bits
+ */
+struct cdm_regcontinuous_cmd {
+ unsigned int count : 16;
+ unsigned int reserved0 : 8;
+ unsigned int cmd : 8;
+ unsigned int offset : 24;
+ unsigned int reserved1 : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_dmi_cmd - Definition for a CDM DMI command.
+ * @length: Number of bytes in LUT - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr: Address of the LUT in memory
+ * @DMIAddr: Address of the target DMI config register
+ * @DMISel: DMI identifier
+ */
+struct cdm_dmi_cmd {
+ unsigned int length : 16;
+ unsigned int reserved : 8;
+ unsigned int cmd : 8;
+ unsigned int addr;
+ unsigned int DMIAddr : 24;
+ unsigned int DMISel : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_indirect_cmd - Definition for a CDM indirect buffer command.
+ * @length: Number of bytes in buffer - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr: Device address of the indirect buffer
+ */
+struct cdm_indirect_cmd {
+ unsigned int length : 16;
+ unsigned int reserved : 8;
+ unsigned int cmd : 8;
+ unsigned int addr;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_changebase_cmd - Definition for CDM base address change command.
+ * @base: Base address to be changed to
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_changebase_cmd {
+ unsigned int base : 24;
+ unsigned int cmd : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command.
+ * @mask: Mask for the events
+ * @id: ID to read back for debug
+ * @iw_reserved: reserved bits
+ * @iw: iw AHB write bit
+ * @cmd:Command ID (CDMCmd)
+ * @offset: Offset to where data is written
+ * @offset_reserved: reserved bits
+ * @data: data returned in IRQ_USR_DATA
+ */
+struct cdm_wait_event_cmd {
+ unsigned int mask : 8;
+ unsigned int id : 8;
+ unsigned int iw_reserved : 7;
+ unsigned int iw : 1;
+ unsigned int cmd : 8;
+ unsigned int offset : 24;
+ unsigned int offset_reserved : 8;
+ unsigned int data;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_genirq_cmd - Definition for a CDM Wait event command.
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ * @userdata: userdata returned in IRQ_USR_DATA
+ */
+struct cdm_genirq_cmd {
+ unsigned int reserved : 24;
+ unsigned int cmd : 8;
+ unsigned int userdata;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command.
+ * @perf: perf command
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_perf_ctrl_cmd {
+ unsigned int perf : 2;
+ unsigned int reserved : 22;
+ unsigned int cmd : 8;
+} __attribute__((__packed__));
+
+uint32_t cdm_get_cmd_header_size(unsigned int command)
+{
+ return CDMCmdHeaderSizes[command];
+}
+
+uint32_t cdm_required_size_reg_continuous(uint32_t numVals)
+{
+ return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals;
+}
+
+uint32_t cdm_required_size_reg_random(uint32_t numRegVals)
+{
+ return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) +
+ (2 * numRegVals);
+}
+
+uint32_t cdm_required_size_dmi(void)
+{
+ return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+}
+
+uint32_t cdm_required_size_genirq(void)
+{
+ return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ);
+}
+
+uint32_t cdm_required_size_indirect(void)
+{
+ return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+}
+
+uint32_t cdm_required_size_changebase(void)
+{
+ return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+}
+
+uint32_t cdm_offsetof_dmi_addr(void)
+{
+ return offsetof(struct cdm_dmi_cmd, addr);
+}
+
+uint32_t cdm_offsetof_indirect_addr(void)
+{
+ return offsetof(struct cdm_indirect_cmd, addr);
+}
+
+uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg,
+ uint32_t numVals, uint32_t *pVals)
+{
+ uint32_t i;
+ struct cdm_regcontinuous_cmd *pHeader =
+ (struct cdm_regcontinuous_cmd *)pCmdBuffer;
+
+ pHeader->count = numVals;
+ pHeader->cmd = CAM_CDM_CMD_REG_CONT;
+ pHeader->reserved0 = 0;
+ pHeader->reserved1 = 0;
+ pHeader->offset = reg;
+
+ pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+
+ for (i = 0; i < numVals; i++)
+ (((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]);
+
+ pCmdBuffer += numVals;
+
+ return pCmdBuffer;
+}
+
+uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals,
+ uint32_t *pRegVals)
+{
+ uint32_t i;
+ uint32_t *dst, *src;
+ struct cdm_regrandom_cmd *pHeader =
+ (struct cdm_regrandom_cmd *)pCmdBuffer;
+
+ pHeader->count = numRegVals;
+ pHeader->cmd = CAM_CDM_CMD_REG_RANDOM;
+ pHeader->reserved = 0;
+
+ pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+ dst = pCmdBuffer;
+ src = pRegVals;
+ for (i = 0; i < numRegVals; i++) {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ }
+
+ return dst;
+}
+
+uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
+ uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr,
+ uint32_t length)
+{
+ struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer;
+
+ pHeader->cmd = dmiCmd;
+ pHeader->addr = dmiBufferAddr;
+ pHeader->length = length - 1;
+ pHeader->DMIAddr = DMIAddr;
+ pHeader->DMISel = DMISel;
+
+ pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+
+ return pCmdBuffer;
+}
+
+uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr,
+ uint32_t length)
+{
+ struct cdm_indirect_cmd *pHeader =
+ (struct cdm_indirect_cmd *)pCmdBuffer;
+
+ pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT;
+ pHeader->addr = indirectBufAddr;
+ pHeader->length = length - 1;
+
+ pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+
+ return pCmdBuffer;
+}
+
+uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base)
+{
+ struct cdm_changebase_cmd *pHeader =
+ (struct cdm_changebase_cmd *)pCmdBuffer;
+
+ pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE;
+ pHeader->base = base;
+ pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+
+ return pCmdBuffer;
+}
+
+void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata)
+{
+ struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer;
+
+ pHeader->cmd = CAM_CDM_CMD_GEN_IRQ;
+ pHeader->userdata = userdata;
+}
+
+struct cam_cdm_utils_ops CDM170_ops = {
+ cdm_get_cmd_header_size,
+ cdm_required_size_reg_continuous,
+ cdm_required_size_reg_random,
+ cdm_required_size_dmi,
+ cdm_required_size_genirq,
+ cdm_required_size_indirect,
+ cdm_required_size_changebase,
+ cdm_offsetof_dmi_addr,
+ cdm_offsetof_indirect_addr,
+ cdm_write_regcontinuous,
+ cdm_write_regrandom,
+ cdm_write_dmi,
+ cdm_write_indirect,
+ cdm_write_changebase,
+ cdm_write_genirq,
+};
+
+int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
+ uint32_t base_array_size,
+ struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+ void __iomem **device_base)
+{
+ int ret = -EINVAL, i;
+
+ for (i = 0; i < base_array_size; i++) {
+ if (base_table[i])
+ CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x",
+ i, (base_table[i])->mem_cam_base, hw_base);
+ if ((base_table[i]) &&
+ ((base_table[i])->mem_cam_base == hw_base)) {
+ *device_base = (base_table[i])->mem_base;
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
+ uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+ int ret = 0;
+ uint32_t *data;
+ struct cdm_regcontinuous_cmd *reg_cont;
+
+ if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
+ (!base_addr)) {
+ CAM_ERR(CAM_CDM, "invalid base addr and data length %d %pK",
+ cmd_buf_size, base_addr);
+ return -EINVAL;
+ }
+
+ reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
+ if ((!reg_cont->count) || (reg_cont->count > 0x10000) ||
+ (((reg_cont->count * sizeof(uint32_t)) +
+ cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
+ cmd_buf_size)) {
+ CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
+ cmd_buf_size, reg_cont->count);
+ return -EINVAL;
+ }
+ data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+ cam_io_memcpy(base_addr + reg_cont->offset, data,
+ reg_cont->count * sizeof(uint32_t));
+
+ *used_bytes = (reg_cont->count * sizeof(uint32_t)) +
+ (4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
+
+ return ret;
+}
+
+static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
+ uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+ uint32_t i;
+ struct cdm_regrandom_cmd *reg_random;
+ uint32_t *data;
+
+ if (!base_addr) {
+ CAM_ERR(CAM_CDM, "invalid base address");
+ return -EINVAL;
+ }
+
+ reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
+ if ((!reg_random->count) || (reg_random->count > 0x10000) ||
+ (((reg_random->count * (sizeof(uint32_t) * 2)) +
+ cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
+ cmd_buf_size)) {
+ CAM_ERR(CAM_CDM, "invalid reg_count %d cmd_buf_size %d",
+ reg_random->count, cmd_buf_size);
+ return -EINVAL;
+ }
+ data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+
+ for (i = 0; i < reg_random->count; i++) {
+ CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
+ ((void __iomem *)(base_addr + data[0])),
+ data[1]);
+ cam_io_w(data[1], base_addr + data[0]);
+ data += 2;
+ }
+
+ *used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
+ (4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+
+ return 0;
+}
+
+static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
+ void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
+ uint32_t *used_bytes)
+{
+ uint32_t i;
+ struct cdm_dmi_cmd *swd_dmi;
+ uint32_t *data;
+
+ swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
+
+ if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
+ CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
+ swd_dmi->length + 1);
+ return -EINVAL;
+ }
+ data = cmd_buf + cdm_required_size_dmi();
+
+ if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
+ for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
+ cam_io_w_mb(data[0], base_addr +
+ swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+ cam_io_w_mb(data[1], base_addr +
+ swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
+ data += 2;
+ }
+ } else {
+ for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
+ cam_io_w_mb(data[0], base_addr +
+ swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+ data += 1;
+ }
+ }
+ *used_bytes = (4 * cdm_required_size_dmi()) + swd_dmi->length + 1;
+
+ return 0;
+}
+
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+ uint32_t *cmd_buf, uint32_t cmd_buf_size,
+ struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+ uint32_t base_array_size, uint8_t bl_tag)
+{
+ int ret = 0;
+ uint32_t cdm_cmd_type = 0, total_cmd_buf_size = 0;
+ uint32_t used_bytes = 0;
+
+ total_cmd_buf_size = cmd_buf_size;
+
+ while (cmd_buf_size > 0) {
+ CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf);
+ cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
+ switch (cdm_cmd_type) {
+ case CAM_CDM_CMD_REG_CONT: {
+ ret = cam_cdm_util_reg_cont_write(*current_device_base,
+ cmd_buf, cmd_buf_size, &used_bytes);
+ if (ret)
+ break;
+
+ if (used_bytes > 0) {
+ cmd_buf_size -= used_bytes;
+ cmd_buf += used_bytes/4;
+ }
+ }
+ break;
+ case CAM_CDM_CMD_REG_RANDOM: {
+ ret = cam_cdm_util_reg_random_write(
+ *current_device_base, cmd_buf, cmd_buf_size,
+ &used_bytes);
+ if (ret)
+ break;
+
+ if (used_bytes > 0) {
+ cmd_buf_size -= used_bytes;
+ cmd_buf += used_bytes / 4;
+ }
+ }
+ break;
+ case CAM_CDM_CMD_SWD_DMI_32:
+ case CAM_CDM_CMD_SWD_DMI_64: {
+ if (*current_device_base == 0) {
+ CAM_ERR(CAM_CDM,
+ "Got SWI DMI cmd =%d for invalid hw",
+ cdm_cmd_type);
+ ret = -EINVAL;
+ break;
+ }
+ ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
+ *current_device_base, cmd_buf, cmd_buf_size,
+ &used_bytes);
+ if (ret)
+ break;
+
+ if (used_bytes > 0) {
+ cmd_buf_size -= used_bytes;
+ cmd_buf += used_bytes / 4;
+ }
+ }
+ break;
+ case CAM_CDM_CMD_CHANGE_BASE: {
+ struct cdm_changebase_cmd *change_base_cmd =
+ (struct cdm_changebase_cmd *)cmd_buf;
+
+ ret = cam_cdm_get_ioremap_from_base(
+ change_base_cmd->base, base_array_size,
+ base_table, current_device_base);
+ if (ret != 0) {
+ CAM_ERR(CAM_CDM,
+ "Get ioremap change base failed %x",
+ change_base_cmd->base);
+ break;
+ }
+ CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK",
+ change_base_cmd->base,
+ current_device_base);
+ cmd_buf_size -= (4 *
+ cdm_required_size_changebase());
+ cmd_buf += cdm_required_size_changebase();
+ }
+ break;
+ default:
+ CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
+ cdm_cmd_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static long cam_cdm_util_dump_dmi_cmd(uint32_t *cmd_buf_addr)
+{
+ long ret = 0;
+
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI];
+ CAM_INFO(CAM_CDM, "DMI");
+ return ret;
+}
+
+static long cam_cdm_util_dump_buff_indirect(uint32_t *cmd_buf_addr)
+{
+ long ret = 0;
+
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT];
+ CAM_INFO(CAM_CDM, "Buff Indirect");
+ return ret;
+}
+
+static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr)
+{
+ long ret = 0;
+ struct cdm_regcontinuous_cmd *p_regcont_cmd;
+ uint32_t *temp_ptr = cmd_buf_addr;
+ int i = 0;
+
+ p_regcont_cmd = (struct cdm_regcontinuous_cmd *)temp_ptr;
+ temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
+
+ CAM_INFO(CAM_CDM, "REG_CONT: COUNT: %u OFFSET: 0x%X",
+ p_regcont_cmd->count, p_regcont_cmd->offset);
+
+ for (i = 0; i < p_regcont_cmd->count; i++) {
+ CAM_INFO(CAM_CDM, "DATA_%d: 0x%X", i,
+ *temp_ptr);
+ temp_ptr++;
+ ret++;
+ }
+
+ return ret;
+}
+
+static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr)
+{
+ struct cdm_regrandom_cmd *p_regrand_cmd;
+ uint32_t *temp_ptr = cmd_buf_addr;
+ long ret = 0;
+ int i = 0;
+
+ p_regrand_cmd = (struct cdm_regrandom_cmd *)temp_ptr;
+ temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
+
+ CAM_INFO(CAM_CDM, "REG_RAND: COUNT: %u",
+ p_regrand_cmd->count);
+
+ for (i = 0; i < p_regrand_cmd->count; i++) {
+ CAM_INFO(CAM_CDM, "OFFSET_%d: 0x%X DATA_%d: 0x%X",
+ i, *temp_ptr & CAM_CDM_REG_OFFSET_MASK, i,
+ *(temp_ptr + 1));
+ temp_ptr += 2;
+ ret += 2;
+ }
+
+ return ret;
+}
+
+static long cam_cdm_util_dump_gen_irq_cmd(uint32_t *cmd_buf_addr)
+{
+ long ret = 0;
+
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_GEN_IRQ];
+
+ CAM_INFO(CAM_CDM, "GEN_IRQ");
+
+ return ret;
+}
+
+static long cam_cdm_util_dump_wait_event_cmd(uint32_t *cmd_buf_addr)
+{
+ long ret = 0;
+
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_WAIT_EVENT];
+
+ CAM_INFO(CAM_CDM, "WAIT_EVENT");
+
+ return ret;
+}
+
+static long cam_cdm_util_dump_change_base_cmd(uint32_t *cmd_buf_addr)
+{
+ long ret = 0;
+ struct cdm_changebase_cmd *p_cbase_cmd;
+ uint32_t *temp_ptr = cmd_buf_addr;
+
+ p_cbase_cmd = (struct cdm_changebase_cmd *)temp_ptr;
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE];
+
+ CAM_INFO(CAM_CDM, "CHANGE_BASE: 0x%X",
+ p_cbase_cmd->base);
+
+ return ret;
+}
+
+static long cam_cdm_util_dump_perf_ctrl_cmd(uint32_t *cmd_buf_addr)
+{
+ long ret = 0;
+
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_PERF_CTRL];
+
+ CAM_INFO(CAM_CDM, "PERF_CTRL");
+
+ return ret;
+}
+
+void cam_cdm_util_dump_cmd_buf(
+ uint32_t *cmd_buf_start, uint32_t *cmd_buf_end)
+{
+ uint32_t *buf_now = cmd_buf_start;
+ uint32_t cmd = 0;
+
+ if (!cmd_buf_start || !cmd_buf_end) {
+ CAM_INFO(CAM_CDM, "Invalid args");
+ return;
+ }
+
+ do {
+ cmd = *buf_now;
+ cmd = cmd >> CAM_CDM_COMMAND_OFFSET;
+
+ switch (cmd) {
+ case CAM_CDM_CMD_DMI:
+ case CAM_CDM_CMD_DMI_32:
+ case CAM_CDM_CMD_DMI_64:
+ buf_now += cam_cdm_util_dump_dmi_cmd(buf_now);
+ break;
+ case CAM_CDM_CMD_REG_CONT:
+ buf_now += cam_cdm_util_dump_reg_cont_cmd(buf_now);
+ break;
+ case CAM_CDM_CMD_REG_RANDOM:
+ buf_now += cam_cdm_util_dump_reg_random_cmd(buf_now);
+ break;
+ case CAM_CDM_CMD_BUFF_INDIRECT:
+ buf_now += cam_cdm_util_dump_buff_indirect(buf_now);
+ break;
+ case CAM_CDM_CMD_GEN_IRQ:
+ buf_now += cam_cdm_util_dump_gen_irq_cmd(buf_now);
+ break;
+ case CAM_CDM_CMD_WAIT_EVENT:
+ buf_now += cam_cdm_util_dump_wait_event_cmd(buf_now);
+ break;
+ case CAM_CDM_CMD_CHANGE_BASE:
+ buf_now += cam_cdm_util_dump_change_base_cmd(buf_now);
+ break;
+ case CAM_CDM_CMD_PERF_CTRL:
+ buf_now += cam_cdm_util_dump_perf_ctrl_cmd(buf_now);
+ break;
+ default:
+ CAM_INFO(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x",
+ cmd, *buf_now);
+ buf_now++;
+ break;
+ }
+ } while (buf_now <= cmd_buf_end);
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_util.h b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_util.h
new file mode 100644
index 000000000000..8f2b48853ca8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_util.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_UTIL_H_
+#define _CAM_CDM_UTIL_H_
+
+enum cam_cdm_command {
+ CAM_CDM_CMD_UNUSED = 0x0,
+ CAM_CDM_CMD_DMI = 0x1,
+ CAM_CDM_CMD_NOT_DEFINED = 0x2,
+ CAM_CDM_CMD_REG_CONT = 0x3,
+ CAM_CDM_CMD_REG_RANDOM = 0x4,
+ CAM_CDM_CMD_BUFF_INDIRECT = 0x5,
+ CAM_CDM_CMD_GEN_IRQ = 0x6,
+ CAM_CDM_CMD_WAIT_EVENT = 0x7,
+ CAM_CDM_CMD_CHANGE_BASE = 0x8,
+ CAM_CDM_CMD_PERF_CTRL = 0x9,
+ CAM_CDM_CMD_DMI_32 = 0xa,
+ CAM_CDM_CMD_DMI_64 = 0xb,
+ CAM_CDM_CMD_PRIVATE_BASE = 0xc,
+ CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64),
+ CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65),
+ CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F
+};
+
+/**
+ * struct cam_cdm_utils_ops - Camera CDM util ops
+ *
+ * @cdm_get_cmd_header_size: Returns the size of the given command header
+ * in DWORDs.
+ * @command Command ID
+ * @return Size of the command in DWORDs
+ *
+ * @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous
+ * command in dwords.
+ * @numVals Number of continuous values
+ * @return Size in dwords
+ *
+ * @cdm_required_size_reg_random: Calculates the size of a reg-random command
+ * in dwords.
+ * @numRegVals Number of register/value pairs
+ * @return Size in dwords
+ *
+ * @cdm_required_size_dmi: Calculates the size of a DMI command in dwords.
+ * @return Size in dwords
+ *
+ * @cdm_required_size_genirq: Calculates size of a Genirq command in dwords.
+ * @return Size in dwords
+ *
+ * @cdm_required_size_indirect: Calculates the size of an indirect command
+ * in dwords.
+ * @return Size in dwords
+ *
+ * @cdm_required_size_changebase: Calculates the size of a change-base command
+ * in dwords.
+ * @return Size in dwords
+ *
+ * @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI
+ * command header.
+ * @return Offset of addr field
+ *
+ * @cdm_offsetof_indirect_addr: Returns the offset of address field in the
+ * indirect command header.
+ * @return Offset of addr field
+ *
+ * @cdm_write_regcontinuous: Writes a command into the command buffer.
+ * @pCmdBuffer: Pointer to command buffer
+ * @reg: Beginning of the register address range where
+ * values will be written.
+ * @numVals: Number of values (registers) that will be written
+ * @pVals : An array of values that will be written
+ * @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_regrandom: Writes a command into the command buffer in
+ * register/value pairs.
+ * @pCmdBuffer: Pointer to command buffer
+ * @numRegVals: Number of register/value pairs that will be written
+ * @pRegVals: An array of register/value pairs that will be written
+ * The even indices are registers and the odd indices
+ * arevalues, e.g., {reg1, val1, reg2, val2, ...}.
+ * @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_dmi: Writes a DMI command into the command bufferM.
+ * @pCmdBuffer: Pointer to command buffer
+ * @dmiCmd: DMI command
+ * @DMIAddr: Address of the DMI
+ * @DMISel: Selected bank that the DMI will write to
+ * @length: Size of data in bytes
+ * @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_indirect: Writes a indirect command into the command buffer.
+ * @pCmdBuffer: Pointer to command buffer
+ * @indirectBufferAddr: Device address of the indirect cmd buffer.
+ * @length: Size of data in bytes
+ * @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_changebase: Writes a changing CDM (address) base command into
+ * the command buffer.
+ * @pCmdBuffer: Pointer to command buffer
+ * @base: New base (device) address
+ * @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_genirq: Writes a gen irq command into the command buffer.
+ * @pCmdBuffer: Pointer to command buffer
+ * @userdata: userdata or cookie return by hardware during irq.
+ */
+struct cam_cdm_utils_ops {
+uint32_t (*cdm_get_cmd_header_size)(unsigned int command);
+uint32_t (*cdm_required_size_reg_continuous)(uint32_t numVals);
+uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals);
+uint32_t (*cdm_required_size_dmi)(void);
+uint32_t (*cdm_required_size_genirq)(void);
+uint32_t (*cdm_required_size_indirect)(void);
+uint32_t (*cdm_required_size_changebase)(void);
+uint32_t (*cdm_offsetof_dmi_addr)(void);
+uint32_t (*cdm_offsetof_indirect_addr)(void);
+uint32_t* (*cdm_write_regcontinuous)(
+ uint32_t *pCmdBuffer,
+ uint32_t reg,
+ uint32_t numVals,
+ uint32_t *pVals);
+uint32_t *(*cdm_write_regrandom)(
+ uint32_t *pCmdBuffer,
+ uint32_t numRegVals,
+ uint32_t *pRegVals);
+uint32_t *(*cdm_write_dmi)(
+ uint32_t *pCmdBuffer,
+ uint8_t dmiCmd,
+ uint32_t DMIAddr,
+ uint8_t DMISel,
+ uint32_t dmiBufferAddr,
+ uint32_t length);
+uint32_t *(*cdm_write_indirect)(
+ uint32_t *pCmdBuffer,
+ uint32_t indirectBufferAddr,
+ uint32_t length);
+uint32_t *(*cdm_write_changebase)(
+ uint32_t *pCmdBuffer,
+ uint32_t base);
+void (*cdm_write_genirq)(
+ uint32_t *pCmdBuffer,
+ uint32_t userdata);
+};
+
+/**
+ * cam_cdm_util_log_cmd_bufs()
+ *
+ * @brief: Util function to log cdm command buffers
+ *
+ * @cmd_buffer_start: Pointer to start of cmd buffer
+ * @cmd_buffer_end: Pointer to end of cmd buffer
+ *
+ */
+void cam_cdm_util_dump_cmd_buf(
+ uint32_t *cmd_buffer_start, uint32_t *cmd_buffer_end);
+
+
+
+#endif /* _CAM_CDM_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_virtual.h b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_virtual.h
new file mode 100644
index 000000000000..88ab1b842fce
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_virtual.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_VIRTUAL_H_
+#define _CAM_CDM_VIRTUAL_H_
+
+#include "cam_cdm_intf_api.h"
+
+int cam_virtual_cdm_probe(struct platform_device *pdev);
+int cam_virtual_cdm_remove(struct platform_device *pdev);
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+ uint32_t *cmd_buf, uint32_t cmd_buf_size,
+ struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+ uint32_t base_array_size, uint8_t bl_tag);
+
+#endif /* _CAM_CDM_VIRTUAL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_virtual_core.c
new file mode 100644
index 000000000000..9021ecabb27c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_virtual_core.c
@@ -0,0 +1,383 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm_virtual.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
+
+static void cam_virtual_cdm_work(struct work_struct *work)
+{
+ struct cam_cdm_work_payload *payload;
+ struct cam_hw_info *cdm_hw;
+ struct cam_cdm *core;
+
+ payload = container_of(work, struct cam_cdm_work_payload, work);
+ if (payload) {
+ cdm_hw = payload->hw;
+ core = (struct cam_cdm *)cdm_hw->core_info;
+ if (payload->irq_status & 0x2) {
+ struct cam_cdm_bl_cb_request_entry *node;
+
+ CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x",
+ payload->irq_data);
+ mutex_lock(&cdm_hw->hw_mutex);
+ node = cam_cdm_find_request_by_bl_tag(
+ payload->irq_data,
+ &core->bl_request_list);
+ if (node) {
+ if (node->request_type ==
+ CAM_HW_CDM_BL_CB_CLIENT) {
+ cam_cdm_notify_clients(cdm_hw,
+ CAM_CDM_CB_STATUS_BL_SUCCESS,
+ (void *)node);
+ } else if (node->request_type ==
+ CAM_HW_CDM_BL_CB_INTERNAL) {
+ CAM_ERR(CAM_CDM, "Invalid node=%pK %d",
+ node, node->request_type);
+ }
+ list_del_init(&node->entry);
+ kfree(node);
+ } else {
+ CAM_ERR(CAM_CDM, "Invalid node for inline irq");
+ }
+ mutex_unlock(&cdm_hw->hw_mutex);
+ }
+ if (payload->irq_status & 0x1) {
+ CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
+ complete(&core->reset_complete);
+ }
+ kfree(payload);
+ }
+
+}
+
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+ struct cam_cdm_hw_intf_cmd_submit_bl *req,
+ struct cam_cdm_client *client)
+{
+ int i, rc = -EINVAL;
+ struct cam_cdm_bl_request *cdm_cmd = req->data;
+ struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+ mutex_lock(&client->lock);
+ for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+ uintptr_t vaddr_ptr = 0;
+ size_t len = 0;
+
+ if ((!cdm_cmd->cmd[i].len) &&
+ (cdm_cmd->cmd[i].len > 0x100000)) {
+ CAM_ERR(CAM_CDM,
+ "len(%d) is invalid count=%d total cnt=%d",
+ cdm_cmd->cmd[i].len, i,
+ req->data->cmd_arrary_count);
+ rc = -EINVAL;
+ break;
+ }
+ if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+ rc = cam_mem_get_cpu_buf(
+ cdm_cmd->cmd[i].bl_addr.mem_handle, &vaddr_ptr,
+ &len);
+ } else if (req->data->type ==
+ CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
+ rc = 0;
+ vaddr_ptr = cdm_cmd->cmd[i].bl_addr.kernel_iova;
+ len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
+ } else {
+ CAM_ERR(CAM_CDM,
+ "Only mem hdl/Kernel va type is supported %d",
+ req->data->type);
+ rc = -EINVAL;
+ break;
+ }
+
+ if ((!rc) && (vaddr_ptr) && (len) &&
+ (len >= cdm_cmd->cmd[i].offset)) {
+ CAM_DBG(CAM_CDM,
+ "hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
+ cdm_cmd->cmd[i].bl_addr.mem_handle,
+ (void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
+ cdm_cmd->cmd[i].len, len);
+ rc = cam_cdm_util_cmd_buf_write(
+ &client->changebase_addr,
+ ((uint32_t *)vaddr_ptr +
+ ((cdm_cmd->cmd[i].offset)/4)),
+ cdm_cmd->cmd[i].len, client->data.base_array,
+ client->data.base_array_cnt, core->bl_tag);
+ if (rc) {
+ CAM_ERR(CAM_CDM,
+ "write failed for cnt=%d:%d len %u",
+ i, req->data->cmd_arrary_count,
+ cdm_cmd->cmd[i].len);
+ break;
+ }
+ } else {
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for hdl=%x len=%zu:%d",
+ cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+ cdm_cmd->cmd[i].offset);
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for cmd_count=%d cnt=%d",
+ i, req->data->cmd_arrary_count);
+ rc = -EINVAL;
+ break;
+ }
+ if (!rc) {
+ struct cam_cdm_work_payload *payload;
+
+ CAM_DBG(CAM_CDM,
+ "write BL success for cnt=%d with tag=%d",
+ i, core->bl_tag);
+ if ((true == req->data->flag) &&
+ (i == req->data->cmd_arrary_count)) {
+ struct cam_cdm_bl_cb_request_entry *node;
+
+ node = kzalloc(sizeof(
+ struct cam_cdm_bl_cb_request_entry),
+ GFP_KERNEL);
+ if (!node) {
+ rc = -ENOMEM;
+ break;
+ }
+ node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+ node->client_hdl = req->handle;
+ node->cookie = req->data->cookie;
+ node->bl_tag = core->bl_tag;
+ node->userdata = req->data->userdata;
+ mutex_lock(&cdm_hw->hw_mutex);
+ list_add_tail(&node->entry,
+ &core->bl_request_list);
+ mutex_unlock(&cdm_hw->hw_mutex);
+
+ payload = kzalloc(sizeof(
+ struct cam_cdm_work_payload),
+ GFP_ATOMIC);
+ if (payload) {
+ payload->irq_status = 0x2;
+ payload->irq_data = core->bl_tag;
+ payload->hw = cdm_hw;
+ INIT_WORK((struct work_struct *)
+ &payload->work,
+ cam_virtual_cdm_work);
+ queue_work(core->work_queue,
+ &payload->work);
+ }
+ }
+ core->bl_tag++;
+ CAM_DBG(CAM_CDM,
+ "Now commit the BL nothing for virtual");
+ if (!rc && (core->bl_tag == 63))
+ core->bl_tag = 0;
+ }
+ }
+ mutex_unlock(&client->lock);
+ return rc;
+}
+
+int cam_virtual_cdm_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *cdm_hw = NULL;
+ struct cam_hw_intf *cdm_hw_intf = NULL;
+ struct cam_cdm *cdm_core = NULL;
+ struct cam_cdm_private_dt_data *soc_private = NULL;
+ int rc;
+ struct cam_cpas_register_params cpas_parms;
+
+ cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!cdm_hw_intf)
+ return -ENOMEM;
+
+ cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!cdm_hw) {
+ kfree(cdm_hw_intf);
+ return -ENOMEM;
+ }
+
+ cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+ if (!cdm_hw->core_info) {
+ kfree(cdm_hw);
+ kfree(cdm_hw_intf);
+ return -ENOMEM;
+ }
+ cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ cdm_hw->soc_info.pdev = pdev;
+ cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM;
+ cdm_hw->soc_info.soc_private = kzalloc(
+ sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL);
+ if (!cdm_hw->soc_info.soc_private) {
+ rc = -ENOMEM;
+ goto soc_load_failed;
+ }
+
+ rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
+ kfree(cdm_hw->soc_info.soc_private);
+ cdm_hw->soc_info.soc_private = NULL;
+ goto soc_load_failed;
+ }
+
+ cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+ soc_private = (struct cam_cdm_private_dt_data *)
+ cdm_hw->soc_info.soc_private;
+ if (soc_private->dt_cdm_shared == true)
+ cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+ else
+ cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+ cdm_core->bl_tag = 0;
+ INIT_LIST_HEAD(&cdm_core->bl_request_list);
+ init_completion(&cdm_core->reset_complete);
+ cdm_hw_intf->hw_priv = cdm_hw;
+ cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+ cdm_hw_intf->hw_ops.init = NULL;
+ cdm_hw_intf->hw_ops.deinit = NULL;
+ cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+ cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+ cdm_hw_intf->hw_ops.read = NULL;
+ cdm_hw_intf->hw_ops.write = NULL;
+ cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+
+ CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
+ cdm_hw_intf->hw_idx);
+
+ platform_set_drvdata(pdev, cdm_hw_intf);
+
+ cdm_hw->open_count = 0;
+ cdm_core->iommu_hdl.non_secure = -1;
+ cdm_core->iommu_hdl.secure = -1;
+ mutex_init(&cdm_hw->hw_mutex);
+ spin_lock_init(&cdm_hw->hw_lock);
+ init_completion(&cdm_hw->hw_complete);
+ mutex_lock(&cdm_hw->hw_mutex);
+ cdm_core->id = CAM_CDM_VIRTUAL;
+ memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME,
+ sizeof(CAM_CDM_VIRTUAL_NAME));
+ cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+ CAM_CDM_INFLIGHT_WORKS);
+ cdm_core->ops = NULL;
+
+ cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+ cpas_parms.cell_index = cdm_hw->soc_info.index;
+ cpas_parms.dev = &pdev->dev;
+ cpas_parms.userdata = cdm_hw_intf;
+ strlcpy(cpas_parms.identifier, "cam-cdm-intf",
+ CAM_HW_IDENTIFIER_LENGTH);
+ rc = cam_cpas_register_client(&cpas_parms);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
+ goto cpas_registration_failed;
+ }
+ CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
+ cpas_parms.client_handle);
+ cdm_core->cpas_handle = cpas_parms.client_handle;
+
+ CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
+
+ rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+ soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed");
+ goto intf_registration_failed;
+ }
+ CAM_DBG(CAM_CDM, "CDM%d registered to intf successful",
+ cdm_hw_intf->hw_idx);
+ mutex_unlock(&cdm_hw->hw_mutex);
+
+ return 0;
+intf_registration_failed:
+ cam_cpas_unregister_client(cdm_core->cpas_handle);
+cpas_registration_failed:
+ kfree(cdm_hw->soc_info.soc_private);
+ flush_workqueue(cdm_core->work_queue);
+ destroy_workqueue(cdm_core->work_queue);
+ mutex_unlock(&cdm_hw->hw_mutex);
+ mutex_destroy(&cdm_hw->hw_mutex);
+soc_load_failed:
+ kfree(cdm_hw->core_info);
+ kfree(cdm_hw);
+ kfree(cdm_hw_intf);
+ return rc;
+}
+
+int cam_virtual_cdm_remove(struct platform_device *pdev)
+{
+ struct cam_hw_info *cdm_hw = NULL;
+ struct cam_hw_intf *cdm_hw_intf = NULL;
+ struct cam_cdm *cdm_core = NULL;
+ int rc = -EBUSY;
+
+ cdm_hw_intf = platform_get_drvdata(pdev);
+ if (!cdm_hw_intf) {
+ CAM_ERR(CAM_CDM, "Failed to get dev private data");
+ return rc;
+ }
+
+ cdm_hw = cdm_hw_intf->hw_priv;
+ if (!cdm_hw) {
+ CAM_ERR(CAM_CDM,
+ "Failed to get virtual private data for type=%d idx=%d",
+ cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+ return rc;
+ }
+
+ cdm_core = cdm_hw->core_info;
+ if (!cdm_core) {
+ CAM_ERR(CAM_CDM,
+ "Failed to get virtual core data for type=%d idx=%d",
+ cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+ return rc;
+ }
+
+ rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
+ return rc;
+ }
+
+ rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf,
+ cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
+ cdm_core->index);
+ if (rc) {
+ CAM_ERR(CAM_CDM,
+ "Virtual CDM Interface de-registration failed");
+ return rc;
+ }
+
+ flush_workqueue(cdm_core->work_queue);
+ destroy_workqueue(cdm_core->work_queue);
+ mutex_destroy(&cdm_hw->hw_mutex);
+ kfree(cdm_hw->soc_info.soc_private);
+ kfree(cdm_hw->core_info);
+ kfree(cdm_hw);
+ kfree(cdm_hw_intf);
+ rc = 0;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_hw_cdm170_reg.h b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_hw_cdm170_reg.h
new file mode 100644
index 000000000000..d0e7e25c565c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_hw_cdm170_reg.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_CDM170_REG_H_
+#define _CAM_HW_CDM170_REG_H_
+
+#define CAM_CDM_REG_OFFSET_FIRST 0x0
+#define CAM_CDM_REG_OFFSET_LAST 0x200
+#define CAM_CDM_REGS_COUNT 0x30
+#define CAM_CDM_HWFIFO_SIZE 0x40
+
+#define CAM_CDM_OFFSET_HW_VERSION 0x0
+#define CAM_CDM_OFFSET_TITAN_VERSION 0x4
+#define CAM_CDM_OFFSET_RST_CMD 0x10
+#define CAM_CDM_OFFSET_CGC_CFG 0x14
+#define CAM_CDM_OFFSET_CORE_CFG 0x18
+#define CAM_CDM_OFFSET_CORE_EN 0x1c
+#define CAM_CDM_OFFSET_FE_CFG 0x20
+#define CAM_CDM_OFFSET_IRQ_MASK 0x30
+#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34
+#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38
+#define CAM_CDM_OFFSET_IRQ_SET 0x3c
+#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40
+
+#define CAM_CDM_OFFSET_IRQ_STATUS 0x44
+#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1
+#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2
+#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4
+#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
+#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
+#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
+
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54
+#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58
+#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c
+#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68
+#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c
+#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80
+#define CAM_CDM_OFFSET_WAIT_STATUS 0x84
+#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90
+#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94
+#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98
+#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c
+#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0
+#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4
+#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8
+#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac
+#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0
+#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4
+#define CAM_CDM_OFFSET_CORE_DBUG 0xd8
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4
+#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8
+#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec
+#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0
+#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104
+#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108
+#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110
+#define CAM_CDM_OFFSET_PERF_MON_0 0x114
+#define CAM_CDM_OFFSET_PERF_MON_1 0x118
+#define CAM_CDM_OFFSET_PERF_MON_2 0x11c
+#define CAM_CDM_OFFSET_SPARE 0x200
+
+/*
+ * Always make sure below register offsets are aligned with
+ * enum cam_cdm_regs offsets
+ */
+struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = {
+ { CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE },
+ { CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE },
+ { CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE },
+ { CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE },
+ { CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE },
+ { CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ },
+ { CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE }
+};
+
+struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = {
+ .first_offset = 0x0,
+ .last_offset = 0x200,
+ .reg_count = 0x30,
+ .offsets = cam170_cpas_cdm_register_offsets,
+ .offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/
+ sizeof(struct cam_cdm_reg_offset)),
+};
+
+#endif /* _CAM_HW_CDM170_REG_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/Makefile b/drivers/media/platform/msm/camera_v3/cam_core/Makefile
new file mode 100644
index 000000000000..d26db272be0a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_context_utils.o cam_node.o cam_subdev.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_context.c b/drivers/media/platform/msm/camera_v3/cam_core/cam_context.c
new file mode 100644
index 000000000000..d4f9b72f99f3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_context.c
@@ -0,0 +1,591 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_context.h"
+#include "cam_debug_util.h"
+#include "cam_node.h"
+
+static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
+ void *evt_data)
+{
+ int rc = 0;
+ struct cam_context *ctx = (struct cam_context *)context;
+
+ if (!ctx || !ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (ctx->state_machine[ctx->state].irq_ops)
+ rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
+ evt_data);
+ else
+ CAM_DBG(CAM_CORE,
+ "No function to handle event %d in dev %d, state %d",
+ evt_id, ctx->dev_hdl, ctx->state);
+ return rc;
+}
+
+int cam_context_shutdown(struct cam_context *ctx)
+{
+ int rc = 0;
+ struct cam_release_dev_cmd cmd;
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state > CAM_CTX_AVAILABLE && ctx->state < CAM_CTX_STATE_MAX) {
+ cmd.session_handle = ctx->session_hdl;
+ cmd.dev_handle = ctx->dev_hdl;
+ rc = cam_context_handle_release_dev(ctx, &cmd);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "context release failed for dev_name %s",
+ ctx->dev_name);
+ else
+ cam_context_putref(ctx);
+ } else {
+ CAM_WARN(CAM_CORE,
+ "dev %s context id %u state %d invalid to release hdl",
+ ctx->dev_name, ctx->ctx_id, ctx->state);
+ rc = -EINVAL;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ rc = cam_destroy_device_hdl(ctx->dev_hdl);
+ if (rc)
+ CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
+ ctx->dev_name);
+ else
+ ctx->dev_hdl = -1;
+
+ return rc;
+}
+
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
+ struct cam_req_mgr_device_info *info)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!info) {
+ CAM_ERR(CAM_CORE, "Invalid get device info payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.get_dev_info) {
+ rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
+ ctx, info);
+ } else {
+ CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_crm_link(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *link)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!link) {
+ CAM_ERR(CAM_CORE, "Invalid link payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.link) {
+ rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
+ } else {
+ CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!unlink) {
+ CAM_ERR(CAM_CORE, "Invalid unlink payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.unlink) {
+ rc = ctx->state_machine[ctx->state].crm_ops.unlink(
+ ctx, unlink);
+ } else {
+ CAM_ERR(CAM_CORE, "No crm unlink in dev %d, name %s, state %d",
+ ctx->dev_hdl, ctx->dev_name, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
+ struct cam_req_mgr_apply_request *apply)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!apply) {
+ CAM_ERR(CAM_CORE, "Invalid apply request payload");
+ return -EINVAL;
+ }
+
+ if (ctx->state_machine[ctx->state].crm_ops.apply_req) {
+ rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
+ apply);
+ } else {
+ CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+
+ return rc;
+}
+
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.flush_req) {
+ rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
+ flush);
+ } else {
+ CAM_ERR(CAM_CORE, "No crm flush req in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+ struct cam_req_mgr_link_evt_data *process_evt)
+{
+ int rc = 0;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.process_evt) {
+ rc = ctx->state_machine[ctx->state].crm_ops.process_evt(ctx,
+ process_evt);
+ } else {
+ /* handling of this message is optional */
+ CAM_DBG(CAM_CORE, "No crm process evt in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+ uint32_t buf_info)
+{
+ int rc = 0;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (ctx->state_machine[ctx->state].pagefault_ops) {
+ rc = ctx->state_machine[ctx->state].pagefault_ops(ctx, iova,
+ buf_info);
+ } else {
+ CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ }
+
+ return rc;
+}
+
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc;
+ int i;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!cmd) {
+ CAM_ERR(CAM_CORE, "Invalid acquire device command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.acquire_dev) {
+ rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
+ ctx, cmd);
+ } else {
+ CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d",
+ cmd->dev_handle, ctx->state);
+ rc = -EPROTO;
+ }
+
+ INIT_LIST_HEAD(&ctx->active_req_list);
+ INIT_LIST_HEAD(&ctx->wait_req_list);
+ INIT_LIST_HEAD(&ctx->pending_req_list);
+ INIT_LIST_HEAD(&ctx->free_req_list);
+
+ for (i = 0; i < ctx->req_size; i++) {
+ INIT_LIST_HEAD(&ctx->req_list[i].list);
+ list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+ }
+
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_acquire_hw(struct cam_context *ctx,
+ void *args)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!args) {
+ CAM_ERR(CAM_CORE, "Invalid acquire device hw command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.acquire_hw) {
+ rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_hw(
+ ctx, args);
+ } else {
+ CAM_ERR(CAM_CORE, "No acquire hw for dev %s, state %d",
+ ctx->dev_name, ctx->state);
+ rc = -EPROTO;
+ }
+
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_release_dev(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!cmd) {
+ CAM_ERR(CAM_CORE, "Invalid release device command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.release_dev) {
+ rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
+ ctx, cmd);
+ } else {
+ CAM_ERR(CAM_CORE, "No release device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_release_hw(struct cam_context *ctx,
+ void *args)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!args) {
+ CAM_ERR(CAM_CORE, "Invalid release HW command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.release_hw) {
+ rc = ctx->state_machine[ctx->state].ioctl_ops.release_hw(
+ ctx, args);
+ } else {
+ CAM_ERR(CAM_CORE, "No release hw for dev %s, state %d",
+ ctx->dev_name, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!cmd) {
+ CAM_ERR(CAM_CORE, "Invalid flush device command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.flush_dev) {
+ rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
+ ctx, cmd);
+ } else {
+ CAM_WARN(CAM_CORE, "No flush device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_config_dev(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "context is not ready");
+ return -EINVAL;
+ }
+
+ if (!cmd) {
+ CAM_ERR(CAM_CORE, "Invalid config device command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.config_dev) {
+ rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
+ ctx, cmd);
+ } else {
+ CAM_ERR(CAM_CORE, "No config device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_start_dev(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ if (!ctx || !ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!cmd) {
+ CAM_ERR(CAM_CORE, "Invalid start device command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.start_dev)
+ rc = ctx->state_machine[ctx->state].ioctl_ops.start_dev(
+ ctx, cmd);
+ else
+ /* start device can be optional for some driver */
+ CAM_DBG(CAM_CORE, "No start device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ if (!ctx || !ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!cmd) {
+ CAM_ERR(CAM_CORE, "Invalid stop device command payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev)
+ rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
+ ctx, cmd);
+ else
+ /* stop device can be optional for some driver */
+ CAM_WARN(CAM_CORE, "No stop device in dev %d, name %s state %d",
+ ctx->dev_hdl, ctx->dev_name, ctx->state);
+
+ ctx->last_flush_req = 0;
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_init(struct cam_context *ctx,
+ const char *dev_name,
+ uint64_t dev_id,
+ uint32_t ctx_id,
+ struct cam_req_mgr_kmd_ops *crm_node_intf,
+ struct cam_hw_mgr_intf *hw_mgr_intf,
+ struct cam_ctx_request *req_list,
+ uint32_t req_size)
+{
+ int i;
+
+ /* crm_node_intf is optinal */
+ if (!ctx || !hw_mgr_intf || !req_list) {
+ CAM_ERR(CAM_CORE, "Invalid input parameters");
+ return -EINVAL;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->dev_hdl = -1;
+ ctx->link_hdl = -1;
+ ctx->session_hdl = -1;
+ INIT_LIST_HEAD(&ctx->list);
+ mutex_init(&ctx->ctx_mutex);
+ mutex_init(&ctx->sync_mutex);
+ spin_lock_init(&ctx->lock);
+
+ ctx->dev_name = dev_name;
+ ctx->dev_id = dev_id;
+ ctx->ctx_id = ctx_id;
+ ctx->ctx_crm_intf = NULL;
+ ctx->crm_ctx_intf = crm_node_intf;
+ ctx->hw_mgr_intf = hw_mgr_intf;
+ ctx->irq_cb_intf = cam_context_handle_hw_event;
+
+ INIT_LIST_HEAD(&ctx->active_req_list);
+ INIT_LIST_HEAD(&ctx->wait_req_list);
+ INIT_LIST_HEAD(&ctx->pending_req_list);
+ INIT_LIST_HEAD(&ctx->free_req_list);
+ ctx->req_list = req_list;
+ ctx->req_size = req_size;
+ for (i = 0; i < req_size; i++) {
+ INIT_LIST_HEAD(&ctx->req_list[i].list);
+ list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+ ctx->req_list[i].ctx = ctx;
+ }
+ ctx->state = CAM_CTX_AVAILABLE;
+ ctx->state_machine = NULL;
+ ctx->ctx_priv = NULL;
+
+ return 0;
+}
+
+int cam_context_deinit(struct cam_context *ctx)
+{
+ if (!ctx)
+ return -EINVAL;
+
+ /**
+ * This is called from platform device remove.
+ * Everyting should be released at this moment.
+ * so we just free the memory for the context
+ */
+ if (ctx->state != CAM_CTX_AVAILABLE)
+ CAM_ERR(CAM_CORE, "Device did not shutdown cleanly");
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ return 0;
+}
+
+void cam_context_putref(struct cam_context *ctx)
+{
+ kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
+ CAM_DBG(CAM_CORE,
+ "ctx device hdl %ld, atomic count %d, dev_name %s",
+ ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)),
+ ctx->dev_name);
+}
+
+void cam_context_getref(struct cam_context *ctx)
+{
+ if (kref_get_unless_zero(&ctx->refcount) == 0) {
+ /* should never happen */
+ WARN(1, "%s fail\n", __func__);
+ }
+ CAM_DBG(CAM_CORE,
+ "ctx device hdl %ld, atomic count %d, dev_name %s",
+ ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)),
+ ctx->dev_name);
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_context.h b/drivers/media/platform/msm/camera_v3/cam_core/cam_context.h
new file mode 100644
index 000000000000..cf07d95eb7f6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_context.h
@@ -0,0 +1,467 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CONTEXT_H_
+#define _CAM_CONTEXT_H_
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_hw_mgr_intf.h"
+
+/* Forward declarations */
+struct cam_context;
+
+/* max request number */
+#define CAM_CTX_REQ_MAX 20
+#define CAM_CTX_CFG_MAX 20
+#define CAM_CTX_RES_MAX 20
+
+/**
+ * enum cam_ctx_state - context top level states
+ *
+ */
+enum cam_context_state {
+ CAM_CTX_UNINIT = 0,
+ CAM_CTX_AVAILABLE = 1,
+ CAM_CTX_ACQUIRED = 2,
+ CAM_CTX_READY = 3,
+ CAM_CTX_ACTIVATED = 4,
+ CAM_CTX_STATE_MAX = 5,
+};
+
+/**
+ * struct cam_ctx_request - Common request structure for the context
+ *
+ * @list: Link list entry
+ * @status: Request status
+ * @request_id: Request id
+ * @req_priv: Derived request object
+ * @hw_update_entries: Hardware update entries
+ * @num_hw_update_entries: Number of hardware update entries
+ * @in_map_entries: Entries for in fences
+ * @num_in_map_entries: Number of in map entries
+ * @out_map_entries: Entries for out fences
+ * @num_out_map_entries: Number of out map entries
+ * @num_in_acked: Number of in fence acked
+ * @num_out_acked: Number of out fence acked
+ * @flushed: Request is flushed
+ * @ctx: The context to which this request belongs
+ * @pf_data page fault debug data
+ *
+ */
+struct cam_ctx_request {
+ struct list_head list;
+ uint32_t status;
+ uint64_t request_id;
+ void *req_priv;
+ struct cam_hw_update_entry hw_update_entries[CAM_CTX_CFG_MAX];
+ uint32_t num_hw_update_entries;
+ struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX];
+ uint32_t num_in_map_entries;
+ struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX];
+ uint32_t num_out_map_entries;
+ atomic_t num_in_acked;
+ uint32_t num_out_acked;
+ int flushed;
+ struct cam_context *ctx;
+ struct cam_hw_mgr_dump_pf_data pf_data;
+};
+
+/**
+ * struct cam_ctx_ioctl_ops - Function table for handling IOCTL calls
+ *
+ * @acquire_dev: Function pointer for acquire device
+ * @release_dev: Function pointer for release device
+ * @config_dev: Function pointer for config device
+ * @start_dev: Function pointer for start device
+ * @stop_dev: Function pointer for stop device
+ * @flush_dev: Function pointer for flush device
+ * @acquire_hw: Function pointer for acquire hw
+ * @release_hw: Function pointer for release hw
+ *
+ */
+struct cam_ctx_ioctl_ops {
+ int (*acquire_dev)(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd);
+ int (*release_dev)(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd);
+ int (*config_dev)(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd);
+ int (*start_dev)(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd);
+ int (*stop_dev)(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd);
+ int (*flush_dev)(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd);
+ int (*acquire_hw)(struct cam_context *ctx, void *args);
+ int (*release_hw)(struct cam_context *ctx, void *args);
+};
+
+/**
+ * struct cam_ctx_crm_ops - Function table for handling CRM to context calls
+ *
+ * @get_dev_info: Get device informaiton
+ * @link: Link the context
+ * @unlink: Unlink the context
+ * @apply_req: Apply setting for the context
+ * @flush_req: Flush request to remove request ids
+ * @process_evt: Handle event notification from CRM.(optional)
+ *
+ */
+struct cam_ctx_crm_ops {
+ int (*get_dev_info)(struct cam_context *ctx,
+ struct cam_req_mgr_device_info *);
+ int (*link)(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *link);
+ int (*unlink)(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *unlink);
+ int (*apply_req)(struct cam_context *ctx,
+ struct cam_req_mgr_apply_request *apply);
+ int (*flush_req)(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush);
+ int (*process_evt)(struct cam_context *ctx,
+ struct cam_req_mgr_link_evt_data *evt_data);
+};
+
+
+/**
+ * struct cam_ctx_ops - Collection of the interface funciton tables
+ *
+ * @ioctl_ops: Ioctl funciton table
+ * @crm_ops: CRM to context interface function table
+ * @irq_ops: Hardware event handle function
+ * @pagefault_ops: Function to be called on page fault
+ *
+ */
+struct cam_ctx_ops {
+ struct cam_ctx_ioctl_ops ioctl_ops;
+ struct cam_ctx_crm_ops crm_ops;
+ cam_hw_event_cb_func irq_ops;
+ cam_hw_pagefault_cb_func pagefault_ops;
+};
+
+/**
+ * struct cam_context - camera context object for the subdevice node
+ *
+ * @dev_name: String giving name of device associated
+ * @dev_id: ID of device associated
+ * @ctx_id: ID for this context
+ * @list: Link list entry
+ * @sessoin_hdl: Session handle
+ * @dev_hdl: Device handle
+ * @link_hdl: Link handle
+ * @ctx_mutex: Mutex for ioctl calls
+ * @lock: Spin lock
+ * @active_req_list: Requests pending for done event
+ * @pending_req_list: Requests pending for reg upd event
+ * @wait_req_list: Requests waiting for apply
+ * @free_req_list: Requests that are free
+ * @req_list: Reference to the request storage
+ * @req_size: Size of the request storage
+ * @hw_mgr_intf: Context to HW interface
+ * @ctx_crm_intf: Context to CRM interface
+ * @crm_ctx_intf: CRM to context interface
+ * @irq_cb_intf: HW to context callback interface
+ * @state: Current state for top level state machine
+ * @state_machine: Top level state machine
+ * @ctx_priv: Private context pointer
+ * @ctxt_to_hw_map: Context to hardware mapping pointer
+ * @refcount: Context object refcount
+ * @node: The main node to which this context belongs
+ * @sync_mutex: mutex to sync with sync cb thread
+ * @last_flush_req: Last request to flush
+ *
+ */
+struct cam_context {
+ const char *dev_name;
+ uint64_t dev_id;
+ uint32_t ctx_id;
+ struct list_head list;
+ int32_t session_hdl;
+ int32_t dev_hdl;
+ int32_t link_hdl;
+
+ struct mutex ctx_mutex;
+ spinlock_t lock;
+
+ struct list_head active_req_list;
+ struct list_head pending_req_list;
+ struct list_head wait_req_list;
+ struct list_head free_req_list;
+ struct cam_ctx_request *req_list;
+ uint32_t req_size;
+
+ struct cam_hw_mgr_intf *hw_mgr_intf;
+ struct cam_req_mgr_crm_cb *ctx_crm_intf;
+ struct cam_req_mgr_kmd_ops *crm_ctx_intf;
+ cam_hw_event_cb_func irq_cb_intf;
+
+ enum cam_context_state state;
+ struct cam_ctx_ops *state_machine;
+
+ void *ctx_priv;
+ void *ctxt_to_hw_map;
+
+ struct kref refcount;
+ void *node;
+ struct mutex sync_mutex;
+ uint32_t last_flush_req;
+};
+
+/**
+ * cam_context_shutdown()
+ *
+ * @brief: Calls while device close or shutdown
+ *
+ * @ctx: Object pointer for cam_context
+ *
+ */
+int cam_context_shutdown(struct cam_context *ctx);
+
+/**
+ * cam_context_handle_crm_get_dev_info()
+ *
+ * @brief: Handle get device information command
+ *
+ * @ctx: Object pointer for cam_context
+ * @info: Device information returned
+ *
+ */
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
+ struct cam_req_mgr_device_info *info);
+
+/**
+ * cam_context_handle_crm_link()
+ *
+ * @brief: Handle link command
+ *
+ * @ctx: Object pointer for cam_context
+ * @link: Link command payload
+ *
+ */
+int cam_context_handle_crm_link(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * cam_context_handle_crm_unlink()
+ *
+ * @brief: Handle unlink command
+ *
+ * @ctx: Object pointer for cam_context
+ * @unlink: Unlink command payload
+ *
+ */
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *unlink);
+
+/**
+ * cam_context_handle_crm_apply_req()
+ *
+ * @brief: Handle apply request command
+ *
+ * @ctx: Object pointer for cam_context
+ * @apply: Apply request command payload
+ *
+ */
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
+ struct cam_req_mgr_apply_request *apply);
+
+/**
+ * cam_context_handle_crm_flush_req()
+ *
+ * @brief: Handle flush request command
+ *
+ * @ctx: Object pointer for cam_context
+ * @apply: Flush request command payload
+ *
+ */
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *apply);
+
+/**
+ * cam_context_handle_crm_process_evt()
+ *
+ * @brief: Handle process event command
+ *
+ * @ctx: Object pointer for cam_context
+ * @process_evt: process event command payload
+ *
+ */
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+ struct cam_req_mgr_link_evt_data *process_evt);
+
+/**
+ * cam_context_dump_pf_info()
+ *
+ * @brief: Handle dump active request request command
+ *
+ * @ctx: Object pointer for cam_context
+ * @iova: Page fault address
+ * @buf_info: Information about closest memory handle
+ *
+ */
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+ uint32_t buf_info);
+
+/**
+ * cam_context_handle_acquire_dev()
+ *
+ * @brief: Handle acquire device command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Acquire device command payload
+ *
+ */
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_acquire_hw()
+ *
+ * @brief: Handle acquire HW command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Acquire HW command payload
+ *
+ */
+int cam_context_handle_acquire_hw(struct cam_context *ctx,
+ void *cmd);
+
+/**
+ * cam_context_handle_release_dev()
+ *
+ * @brief: Handle release device command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Release device command payload
+ *
+ */
+int cam_context_handle_release_dev(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_release_hw()
+ *
+ * @brief: Handle release HW command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Release HW command payload
+ *
+ */
+int cam_context_handle_release_hw(struct cam_context *ctx,
+ void *cmd);
+
+/**
+ * cam_context_handle_config_dev()
+ *
+ * @brief: Handle config device command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Config device command payload
+ *
+ */
+int cam_context_handle_config_dev(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_flush_dev()
+ *
+ * @brief: Handle flush device command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Flush device command payload
+ *
+ */
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_start_dev()
+ *
+ * @brief: Handle start device command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Start device command payload
+ *
+ */
+int cam_context_handle_start_dev(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_stop_dev()
+ *
+ * @brief: Handle stop device command
+ *
+ * @ctx: Object pointer for cam_context
+ * @cmd: Stop device command payload
+ *
+ */
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_deinit()
+ *
+ * @brief: Camera context deinitialize function
+ *
+ * @ctx: Object pointer for cam_context
+ *
+ */
+int cam_context_deinit(struct cam_context *ctx);
+
+/**
+ * cam_context_init()
+ *
+ * @brief: Camera context initialize function
+ *
+ * @ctx: Object pointer for cam_context
+ * @dev_name: String giving name of device associated
+ * @dev_id: ID of the device associated
+ * @ctx_id: ID for this context
+ * @crm_node_intf: Function table for crm to context interface
+ * @hw_mgr_intf: Function table for context to hw interface
+ * @req_list: Requests storage
+ * @req_size: Size of the request storage
+ *
+ */
+int cam_context_init(struct cam_context *ctx,
+ const char *dev_name,
+ uint64_t dev_id,
+ uint32_t ctx_id,
+ struct cam_req_mgr_kmd_ops *crm_node_intf,
+ struct cam_hw_mgr_intf *hw_mgr_intf,
+ struct cam_ctx_request *req_list,
+ uint32_t req_size);
+
+/**
+ * cam_context_putref()
+ *
+ * @brief: Put back context reference.
+ *
+ * @ctx: Context for which ref is returned
+ *
+ */
+void cam_context_putref(struct cam_context *ctx);
+
+/**
+ * cam_context_getref()
+ *
+ * @brief: Get back context reference.
+ *
+ * @ctx: Context for which ref is taken
+ *
+ */
+void cam_context_getref(struct cam_context *ctx);
+
+#endif /* _CAM_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera_v3/cam_core/cam_context_utils.c
new file mode 100644
index 000000000000..d98b900bc07a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_context_utils.c
@@ -0,0 +1,1013 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/cam_sync.h>
+#include <media/cam_defs.h>
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_mem_mgr.h"
+#include "cam_node.h"
+#include "cam_req_mgr_util.h"
+#include "cam_sync_api.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+
+static uint cam_debug_ctx_req_list;
+module_param(cam_debug_ctx_req_list, uint, 0644);
+
+static inline int cam_context_validate_thread(void)
+{
+ if (in_interrupt()) {
+ WARN(1, "Invalid execution context\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int cam_context_buf_done_from_hw(struct cam_context *ctx,
+ void *done_event_data, uint32_t bubble_state)
+{
+ int j;
+ int result;
+ struct cam_ctx_request *req;
+ struct cam_hw_done_event_data *done =
+ (struct cam_hw_done_event_data *)done_event_data;
+ int rc;
+
+ if (!ctx || !done) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done);
+ return -EINVAL;
+ }
+
+ rc = cam_context_validate_thread();
+ if (rc)
+ return rc;
+
+ spin_lock(&ctx->lock);
+ if (list_empty(&ctx->active_req_list)) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] no active request",
+ ctx->dev_name, ctx->ctx_id);
+ spin_unlock(&ctx->lock);
+ return -EIO;
+ }
+ req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+
+ trace_cam_buf_done("UTILS", ctx, req);
+
+ if (done->request_id != req->request_id) {
+ CAM_ERR(CAM_CTXT,
+ "[%s][%d] mismatch: done req[%lld], active req[%lld]",
+ ctx->dev_name, ctx->ctx_id,
+ done->request_id, req->request_id);
+ spin_unlock(&ctx->lock);
+ return -EIO;
+ }
+
+ if (!req->num_out_map_entries) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] no output fence to signal",
+ ctx->dev_name, ctx->ctx_id);
+ spin_unlock(&ctx->lock);
+ return -EIO;
+ }
+
+ /*
+ * since another thread may be adding/removing from active
+ * list, so hold the lock
+ */
+ list_del_init(&req->list);
+ spin_unlock(&ctx->lock);
+ if (!bubble_state) {
+ result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+ } else {
+ CAM_DBG(CAM_REQ,
+ "[%s][ctx_id %d] : req[%llu] is done with error",
+ ctx->dev_name, ctx->ctx_id, req->request_id);
+
+ for (j = 0; j < req->num_out_map_entries; j++)
+ CAM_DBG(CAM_REQ, "fence %d signaled with error",
+ req->out_map_entries[j].sync_id);
+
+ result = CAM_SYNC_STATE_SIGNALED_ERROR;
+ }
+
+ for (j = 0; j < req->num_out_map_entries; j++) {
+ cam_sync_signal(req->out_map_entries[j].sync_id, result);
+ req->out_map_entries[j].sync_id = -1;
+ }
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving req[%llu] from active_list to free_list",
+ ctx->dev_name, ctx->ctx_id, req->request_id);
+
+ /*
+ * another thread may be adding/removing from free list,
+ * so hold the lock
+ */
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ req->ctx = NULL;
+ spin_unlock(&ctx->lock);
+
+ return 0;
+}
+
+static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
+ struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_context *ctx = req->ctx;
+ struct cam_hw_config_args cfg;
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ spin_lock(&ctx->lock);
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+ spin_unlock(&ctx->lock);
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving req[%llu] from pending_list to active_list",
+ ctx->dev_name, ctx->ctx_id, req->request_id);
+
+ cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ cfg.request_id = req->request_id;
+ cfg.hw_update_entries = req->hw_update_entries;
+ cfg.num_hw_update_entries = req->num_hw_update_entries;
+ cfg.out_map_entries = req->out_map_entries;
+ cfg.num_out_map_entries = req->num_out_map_entries;
+ cfg.priv = req->req_priv;
+
+ rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+ if (rc) {
+ spin_lock(&ctx->lock);
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving req[%llu] from active_list to free_list",
+ ctx->dev_name, ctx->ctx_id, req->request_id);
+ }
+
+end:
+ return rc;
+}
+
+static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
+{
+ struct cam_ctx_request *req = data;
+ struct cam_context *ctx = NULL;
+ struct cam_flush_dev_cmd flush_cmd;
+ struct cam_req_mgr_apply_request apply;
+ int rc;
+
+ if (!req) {
+ CAM_ERR(CAM_CTXT, "Invalid input param");
+ return;
+ }
+ rc = cam_context_validate_thread();
+ if (rc)
+ return;
+
+ ctx = req->ctx;
+ if (!ctx) {
+ CAM_ERR(CAM_CTXT, "Invalid ctx for req %llu", req->request_id);
+ return;
+ }
+
+ if (atomic_inc_return(&req->num_in_acked) == req->num_in_map_entries) {
+ apply.request_id = req->request_id;
+ /*
+ * take mutex to ensure that another thread does
+ * not flush the request while this
+ * thread is submitting it to h/w. The submit to
+ * h/w and adding to the active list should happen
+ * in a critical section which is provided by this
+ * mutex.
+ */
+ if (status == CAM_SYNC_STATE_SIGNALED_ERROR) {
+ CAM_DBG(CAM_CTXT, "fence error: %d", sync_obj);
+ flush_cmd.req_id = req->request_id;
+ cam_context_flush_req_to_hw(ctx, &flush_cmd);
+ }
+
+ mutex_lock(&ctx->sync_mutex);
+ if (!req->flushed) {
+ cam_context_apply_req_to_hw(req, &apply);
+ mutex_unlock(&ctx->sync_mutex);
+ } else {
+ req->flushed = 0;
+ req->ctx = NULL;
+ mutex_unlock(&ctx->sync_mutex);
+ spin_lock(&ctx->lock);
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving req[%llu] from pending_list to free_list",
+ ctx->dev_name, ctx->ctx_id,
+ req->request_id);
+ }
+ }
+ cam_context_putref(ctx);
+}
+
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ struct cam_hw_release_args arg;
+
+ if (!ctx) {
+ CAM_ERR(CAM_CTXT, "Invalid input param");
+ return -EINVAL;
+ }
+
+ if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ return -EINVAL;
+ }
+
+ arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ arg.active_req = false;
+
+ ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+ ctx->ctxt_to_hw_map = NULL;
+
+ ctx->session_hdl = -1;
+ ctx->dev_hdl = -1;
+ ctx->link_hdl = -1;
+
+ return 0;
+}
+
+int32_t cam_context_config_dev_to_hw(
+ struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+ int rc = 0;
+ size_t len;
+ struct cam_hw_stream_setttings cfg;
+ uintptr_t packet_addr;
+ struct cam_packet *packet;
+
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ return -EINVAL;
+ }
+
+ if (!ctx->hw_mgr_intf->hw_config_stream_settings) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ return rc;
+ }
+
+ rc = cam_context_validate_thread();
+ if (rc) {
+ CAM_ERR(CAM_CTXT,
+ "Not executing in the right context");
+ return rc;
+ }
+
+ rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+ &packet_addr, &len);
+ if (rc) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+ (uint32_t)cmd->offset);
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.packet = packet;
+ cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ cfg.priv = NULL;
+
+ CAM_DBG(CAM_CTXT, "Processing config settings");
+ rc = ctx->hw_mgr_intf->hw_config_stream_settings(
+ ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+ if (rc) {
+ CAM_ERR(CAM_CTXT,
+ "[%s][%d] Config failed stream settings",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ int rc = 0;
+ struct cam_ctx_request *req = NULL;
+ struct cam_hw_prepare_update_args cfg;
+ uintptr_t packet_addr;
+ struct cam_packet *packet;
+ size_t len = 0;
+ int32_t i = 0, j = 0;
+
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto end;
+ }
+ rc = cam_context_validate_thread();
+ if (rc)
+ return rc;
+
+ spin_lock(&ctx->lock);
+ if (!list_empty(&ctx->free_req_list)) {
+ req = list_first_entry(&ctx->free_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ }
+ spin_unlock(&ctx->lock);
+
+ if (!req) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] No more request obj free",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ memset(req, 0, sizeof(*req));
+ INIT_LIST_HEAD(&req->list);
+ req->ctx = ctx;
+
+ /* for config dev, only memory handle is supported */
+ /* map packet from the memhandle */
+ rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+ &packet_addr, &len);
+ if (rc != 0) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+ (uint32_t)cmd->offset);
+
+ if (packet->header.request_id <= ctx->last_flush_req) {
+ CAM_DBG(CAM_CORE,
+ "request %lld has been flushed, reject packet",
+ packet->header.request_id);
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ if (packet->header.request_id > ctx->last_flush_req)
+ ctx->last_flush_req = 0;
+
+ /* preprocess the configuration */
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.packet = packet;
+ cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ cfg.max_hw_update_entries = CAM_CTX_CFG_MAX;
+ cfg.num_hw_update_entries = req->num_hw_update_entries;
+ cfg.hw_update_entries = req->hw_update_entries;
+ cfg.max_out_map_entries = CAM_CTX_CFG_MAX;
+ cfg.out_map_entries = req->out_map_entries;
+ cfg.max_in_map_entries = CAM_CTX_CFG_MAX;
+ cfg.in_map_entries = req->in_map_entries;
+ cfg.pf_data = &(req->pf_data);
+
+ rc = ctx->hw_mgr_intf->hw_prepare_update(
+ ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+ if (rc != 0) {
+ CAM_ERR(CAM_CTXT,
+ "[%s][%d] Prepare config packet failed in HW layer",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto free_req;
+ }
+ req->num_hw_update_entries = cfg.num_hw_update_entries;
+ req->num_out_map_entries = cfg.num_out_map_entries;
+ req->num_in_map_entries = cfg.num_in_map_entries;
+ atomic_set(&req->num_in_acked, 0);
+ req->request_id = packet->header.request_id;
+ req->status = 1;
+ req->req_priv = cfg.priv;
+
+ for (i = 0; i < req->num_out_map_entries; i++) {
+ rc = cam_sync_get_obj_ref(req->out_map_entries[i].sync_id);
+ if (rc) {
+ CAM_ERR(CAM_CTXT, "Can't get ref for sync %d",
+ req->out_map_entries[i].sync_id);
+ goto put_ref;
+ }
+ }
+
+ if (req->num_in_map_entries > 0) {
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->pending_req_list);
+ spin_unlock(&ctx->lock);
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving req[%llu] from free_list to pending_list",
+ ctx->dev_name, ctx->ctx_id, req->request_id);
+
+ for (j = 0; j < req->num_in_map_entries; j++) {
+ cam_context_getref(ctx);
+ rc = cam_sync_register_callback(
+ cam_context_sync_callback,
+ (void *)req,
+ req->in_map_entries[j].sync_id);
+ if (rc) {
+ CAM_ERR(CAM_CTXT,
+ "[%s][%d] Failed register fence cb: %d ret = %d",
+ ctx->dev_name, ctx->ctx_id,
+ req->in_map_entries[j].sync_id, rc);
+ spin_lock(&ctx->lock);
+ list_del_init(&req->list);
+ spin_unlock(&ctx->lock);
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving req[%llu] from pending_list to free_list",
+ ctx->dev_name, ctx->ctx_id,
+ req->request_id);
+
+ cam_context_putref(ctx);
+
+ goto put_ref;
+ }
+ CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
+ req->in_map_entries[j].sync_id, rc);
+ }
+ goto end;
+ }
+
+ return rc;
+
+put_ref:
+ for (--i; i >= 0; i--) {
+ rc = cam_sync_put_obj_ref(req->out_map_entries[i].sync_id);
+ if (rc)
+ CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
+ req->out_map_entries[i].sync_id);
+ }
+free_req:
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ req->ctx = NULL;
+ spin_unlock(&ctx->lock);
+end:
+ return rc;
+}
+
+int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc;
+ struct cam_hw_acquire_args param;
+ struct cam_create_dev_hdl req_hdl_param;
+ struct cam_hw_release_args release;
+
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ CAM_DBG(CAM_CTXT, "ses hdl: %x, num_res: %d, type: %d, res: %lld",
+ cmd->session_handle, cmd->num_resources, cmd->handle_type,
+ cmd->resource_hdl);
+
+ if (cmd->num_resources > CAM_CTX_RES_MAX) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] resource limit exceeded",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ /* for now we only support user pointer */
+ if (cmd->handle_type != 1) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] Only user pointer is supported",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* fill in parameters */
+ param.context_data = ctx;
+ param.event_cb = ctx->irq_cb_intf;
+ param.num_acq = cmd->num_resources;
+ param.acquire_info = cmd->resource_hdl;
+
+ /* call HW manager to reserve the resource */
+ rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+ &param);
+ if (rc != 0) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] Acquire device failed",
+ ctx->dev_name, ctx->ctx_id);
+ goto end;
+ }
+
+ ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
+
+ /* if hw resource acquire successful, acquire dev handle */
+ req_hdl_param.session_hdl = cmd->session_handle;
+ /* bridge is not ready for these flags. so false for now */
+ req_hdl_param.v4l2_sub_dev_flag = 0;
+ req_hdl_param.media_entity_flag = 0;
+ req_hdl_param.priv = ctx;
+ req_hdl_param.ops = ctx->crm_ctx_intf;
+
+ ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+ if (ctx->dev_hdl <= 0) {
+ rc = -EFAULT;
+ CAM_ERR(CAM_CTXT, "[%s][%d] Can not create device handle",
+ ctx->dev_name, ctx->ctx_id);
+ goto free_hw;
+ }
+ cmd->dev_handle = ctx->dev_hdl;
+
+ /* store session information */
+ ctx->session_hdl = cmd->session_handle;
+
+ return rc;
+
+free_hw:
+ release.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+ ctx->ctxt_to_hw_map = NULL;
+ ctx->dev_hdl = -1;
+end:
+ return rc;
+}
+
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
+{
+ struct cam_hw_flush_args flush_args;
+ struct list_head temp_list;
+ struct cam_ctx_request *req;
+ uint32_t i;
+ int rc = 0;
+ bool free_req;
+
+ CAM_DBG(CAM_CTXT, "[%s] E: NRT flush ctx", ctx->dev_name);
+ memset(&flush_args, 0, sizeof(flush_args));
+
+ /*
+ * flush pending requests, take the sync lock to synchronize with the
+ * sync callback thread so that the sync cb thread does not try to
+ * submit request to h/w while the request is being flushed
+ */
+ mutex_lock(&ctx->sync_mutex);
+ INIT_LIST_HEAD(&temp_list);
+ spin_lock(&ctx->lock);
+ list_splice_init(&ctx->pending_req_list, &temp_list);
+ spin_unlock(&ctx->lock);
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving all pending requests from pending_list to temp_list",
+ ctx->dev_name, ctx->ctx_id);
+
+ flush_args.num_req_pending = 0;
+ while (true) {
+ spin_lock(&ctx->lock);
+ if (list_empty(&temp_list)) {
+ spin_unlock(&ctx->lock);
+ break;
+ }
+
+ req = list_first_entry(&temp_list,
+ struct cam_ctx_request, list);
+
+ list_del_init(&req->list);
+ spin_unlock(&ctx->lock);
+ req->flushed = 1;
+
+ flush_args.flush_req_pending[flush_args.num_req_pending++] =
+ req->req_priv;
+
+ free_req = false;
+ for (i = 0; i < req->num_in_map_entries; i++) {
+ rc = cam_sync_deregister_callback(
+ cam_context_sync_callback,
+ (void *)req,
+ req->in_map_entries[i].sync_id);
+ if (!rc) {
+ cam_context_putref(ctx);
+ if (atomic_inc_return(&req->num_in_acked) ==
+ req->num_in_map_entries)
+ free_req = true;
+ }
+ }
+
+ for (i = 0; i < req->num_out_map_entries; i++) {
+ if (req->out_map_entries[i].sync_id != -1) {
+ rc = cam_sync_signal(
+ req->out_map_entries[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc == -EALREADY) {
+ CAM_ERR(CAM_CTXT,
+ "Req: %llu already signalled, sync_id:%d",
+ req->request_id,
+ req->out_map_entries[i].sync_id);
+ break;
+ }
+ }
+ }
+
+ /*
+ * If we have deregistered the last sync callback, req will
+ * not be put on the free list. So put it on the free list here
+ */
+ if (free_req) {
+ req->ctx = NULL;
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+ }
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Deleting req[%llu] from temp_list",
+ ctx->dev_name, ctx->ctx_id, req->request_id);
+ }
+ mutex_unlock(&ctx->sync_mutex);
+
+ if (ctx->hw_mgr_intf->hw_flush) {
+ flush_args.num_req_active = 0;
+ spin_lock(&ctx->lock);
+ list_for_each_entry(req, &ctx->active_req_list, list) {
+ flush_args.flush_req_active[flush_args.num_req_active++]
+ = req->req_priv;
+ }
+ spin_unlock(&ctx->lock);
+
+ if (flush_args.num_req_pending || flush_args.num_req_active) {
+ flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ flush_args.flush_type = CAM_FLUSH_TYPE_ALL;
+ ctx->hw_mgr_intf->hw_flush(
+ ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+ }
+ }
+
+ INIT_LIST_HEAD(&temp_list);
+ spin_lock(&ctx->lock);
+ list_splice_init(&ctx->active_req_list, &temp_list);
+ INIT_LIST_HEAD(&ctx->active_req_list);
+ spin_unlock(&ctx->lock);
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving all requests from active_list to temp_list",
+ ctx->dev_name, ctx->ctx_id);
+
+ while (true) {
+ spin_lock(&ctx->lock);
+ if (list_empty(&temp_list)) {
+ spin_unlock(&ctx->lock);
+ break;
+ }
+ req = list_first_entry(&temp_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ spin_unlock(&ctx->lock);
+
+ for (i = 0; i < req->num_out_map_entries; i++) {
+ if (req->out_map_entries[i].sync_id != -1) {
+ rc = cam_sync_signal(
+ req->out_map_entries[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc == -EALREADY) {
+ CAM_ERR(CAM_CTXT,
+ "Req: %llu already signalled ctx: %pK dev_name: %s dev_handle: %d ctx_state: %d",
+ req->request_id, req->ctx,
+ req->ctx->dev_name,
+ req->ctx->dev_hdl,
+ req->ctx->state);
+ break;
+ }
+ }
+ }
+
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+ req->ctx = NULL;
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving req[%llu] from temp_list to free_list",
+ ctx->dev_name, ctx->ctx_id, req->request_id);
+ }
+
+ CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name);
+
+ return 0;
+}
+
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ struct cam_ctx_request *req = NULL;
+ struct cam_hw_flush_args flush_args;
+ uint32_t i;
+ int32_t sync_id = 0;
+ int rc = 0;
+ bool free_req = false;
+
+ CAM_DBG(CAM_CTXT, "[%s] E: NRT flush req", ctx->dev_name);
+
+ memset(&flush_args, 0, sizeof(flush_args));
+ flush_args.num_req_pending = 0;
+ flush_args.num_req_active = 0;
+ mutex_lock(&ctx->sync_mutex);
+ spin_lock(&ctx->lock);
+ list_for_each_entry(req, &ctx->pending_req_list, list) {
+ if (req->request_id != cmd->req_id)
+ continue;
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Deleting req[%llu] from pending_list",
+ ctx->dev_name, ctx->ctx_id, req->request_id);
+
+ list_del_init(&req->list);
+ req->flushed = 1;
+
+ flush_args.flush_req_pending[flush_args.num_req_pending++] =
+ req->req_priv;
+ break;
+ }
+ spin_unlock(&ctx->lock);
+ mutex_unlock(&ctx->sync_mutex);
+
+ if (ctx->hw_mgr_intf->hw_flush) {
+ if (!flush_args.num_req_pending) {
+ spin_lock(&ctx->lock);
+ list_for_each_entry(req, &ctx->active_req_list, list) {
+ if (req->request_id != cmd->req_id)
+ continue;
+
+ list_del_init(&req->list);
+
+ flush_args.flush_req_active[
+ flush_args.num_req_active++] =
+ req->req_priv;
+ break;
+ }
+ spin_unlock(&ctx->lock);
+ }
+
+ if (flush_args.num_req_pending || flush_args.num_req_active) {
+ flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ flush_args.flush_type = CAM_FLUSH_TYPE_REQ;
+ ctx->hw_mgr_intf->hw_flush(
+ ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+ }
+ }
+
+ if (req) {
+ if (flush_args.num_req_pending) {
+ for (i = 0; i < req->num_in_map_entries; i++) {
+ rc = cam_sync_deregister_callback(
+ cam_context_sync_callback,
+ (void *)req,
+ req->in_map_entries[i].sync_id);
+ if (rc)
+ continue;
+
+ cam_context_putref(ctx);
+ if (atomic_inc_return(&req->num_in_acked) ==
+ req->num_in_map_entries)
+ free_req = true;
+ }
+ }
+
+ if (flush_args.num_req_pending || flush_args.num_req_active) {
+ for (i = 0; i < req->num_out_map_entries; i++) {
+ sync_id =
+ req->out_map_entries[i].sync_id;
+ if (sync_id != -1) {
+ rc = cam_sync_signal(sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc == -EALREADY) {
+ CAM_ERR(CAM_CTXT,
+ "Req: %llu already signalled, sync_id:%d",
+ req->request_id, sync_id);
+ break;
+ }
+ }
+ }
+ if (flush_args.num_req_active || free_req) {
+ req->ctx = NULL;
+ spin_lock(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+
+ if (cam_debug_ctx_req_list & ctx->dev_id)
+ CAM_INFO(CAM_CTXT,
+ "[%s][%d] : Moving req[%llu] from %s to free_list",
+ ctx->dev_name, ctx->ctx_id,
+ req->request_id,
+ flush_args.num_req_active ?
+ "active_list" :
+ "pending_list");
+ }
+ }
+ }
+ CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name);
+
+ return 0;
+}
+
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+
+ int rc = 0;
+
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ if (cmd->flush_type == CAM_FLUSH_TYPE_ALL) {
+ ctx->last_flush_req = cmd->req_id;
+ rc = cam_context_flush_ctx_to_hw(ctx);
+ } else if (cmd->flush_type == CAM_FLUSH_TYPE_REQ)
+ rc = cam_context_flush_req_to_hw(ctx, cmd);
+ else {
+ rc = -EINVAL;
+ CAM_ERR(CAM_CORE, "[%s][%d] Invalid flush type %d",
+ ctx->dev_name, ctx->ctx_id, cmd->flush_type);
+ }
+
+end:
+ return rc;
+}
+
+int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+ struct cam_hw_start_args arg;
+
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ if ((cmd->session_handle != ctx->session_hdl) ||
+ (cmd->dev_handle != ctx->dev_hdl)) {
+ CAM_ERR(CAM_CTXT,
+ "[%s][%d] Invalid session hdl[%d], dev_handle[%d]",
+ ctx->dev_name, ctx->ctx_id,
+ cmd->session_handle, cmd->dev_handle);
+ rc = -EPERM;
+ goto end;
+ }
+
+ if (ctx->hw_mgr_intf->hw_start) {
+ arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+ &arg);
+ if (rc) {
+ /* HW failure. user need to clean up the resource */
+ CAM_ERR(CAM_CTXT, "[%s][%d] Start HW failed",
+ ctx->dev_name, ctx->ctx_id);
+ goto end;
+ }
+ }
+
+end:
+ return rc;
+}
+
+int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
+{
+ int rc = 0;
+ struct cam_hw_stop_args stop;
+
+ if (!ctx) {
+ CAM_ERR(CAM_CTXT, "Invalid input param");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ rc = cam_context_validate_thread();
+ if (rc)
+ goto end;
+
+ rc = cam_context_flush_ctx_to_hw(ctx);
+ if (rc)
+ goto end;
+
+ /* stop hw first */
+ if (ctx->hw_mgr_intf->hw_stop) {
+ stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+ &stop);
+ }
+
+end:
+ return rc;
+}
+
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+ struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+ bool *mem_found)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args cmd_args;
+
+ if (!ctx) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK ", ctx);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ if (ctx->hw_mgr_intf->hw_cmd) {
+ cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_PF_INFO;
+ cmd_args.u.pf_args.pf_data.packet = packet;
+ cmd_args.u.pf_args.iova = iova;
+ cmd_args.u.pf_args.buf_info = buf_info;
+ cmd_args.u.pf_args.mem_found = mem_found;
+ ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &cmd_args);
+ }
+
+end:
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera_v3/cam_core/cam_context_utils.h
new file mode 100644
index 000000000000..e1809b2fa436
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_context_utils.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CONTEXT_UTILS_H_
+#define _CAM_CONTEXT_UTILS_H_
+
+#include <linux/types.h>
+
+int cam_context_buf_done_from_hw(struct cam_context *ctx,
+ void *done_event_data, uint32_t bubble_state);
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd);
+int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd);
+int32_t cam_context_config_dev_to_hw(
+ struct cam_context *ctx, struct cam_config_dev_cmd *cmd);
+int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd);
+int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd);
+int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+ struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+ bool *mem_found);
+
+#endif /* _CAM_CONTEXT_UTILS_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_core_defs.h b/drivers/media/platform/msm/camera_v3/cam_core/cam_core_defs.h
new file mode 100644
index 000000000000..32330b1e07e4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_core_defs.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CORE_DEFS_H_
+#define _CAM_CORE_DEFS_H_
+
+#define CAM_CORE_TRACE_ENABLE 0
+
+#if (CAM_CORE_TRACE_ENABLE == 1)
+ #define CAM_CORE_DBG(fmt, args...) do { \
+ trace_printk("%d: [cam_core_dbg] "fmt"\n", __LINE__, ##args); \
+ pr_debug("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
+ } while (0)
+
+ #define CAM_CORE_WARN(fmt, args...) do { \
+ trace_printk("%d: [cam_core_warn] "fmt"\n", __LINE__, ##args); \
+ pr_warn("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
+ } while (0)
+
+ #define CAM_CORE_ERR(fmt, args...) do { \
+ trace_printk("%d: [cam_core_err] "fmt"\n", __LINE__, ##args); \
+ pr_err("%s:%d "fmt"\n", __func__, __LINE__, ##args);\
+ } while (0)
+#else
+ #define CAM_CORE_DBG(fmt, args...) pr_debug("%s:%d "fmt"\n", \
+ __func__, __LINE__, ##args)
+
+ #define CAM_CORE_WARN(fmt, args...) pr_warn("%s:%d "fmt"\n", \
+ __func__, __LINE__, ##args)
+
+ #define CAM_CORE_ERR(fmt, args...) pr_err("%s:%d "fmt"\n", \
+ __func__, __LINE__, ##args)
+#endif
+
+#endif /* _CAM_CORE_DEFS_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_hw.h b/drivers/media/platform/msm/camera_v3/cam_core/cam_hw.h
new file mode 100644
index 000000000000..91e0f9d46245
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_hw.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_H_
+#define _CAM_HW_H_
+
+#include "cam_soc_util.h"
+
+/*
+ * This file declares Enums, Structures and APIs to be used as template
+ * when writing any HW driver in the camera subsystem.
+ */
+
+/* Hardware state enum */
+enum cam_hw_state {
+ CAM_HW_STATE_POWER_DOWN,
+ CAM_HW_STATE_POWER_UP,
+};
+
+/**
+ * struct cam_hw_info - Common hardware information
+ *
+ * @hw_mutex: Hardware mutex
+ * @hw_lock: Hardware spinlock
+ * @hw_complete: Hardware Completion
+ * @open_count: Count to track the HW enable from the client
+ * @hw_state: Hardware state
+ * @soc_info: Platform SOC properties for hardware
+ * @node_info: Private HW data related to nodes
+ * @core_info: Private HW data related to core logic
+ *
+ */
+struct cam_hw_info {
+ struct mutex hw_mutex;
+ spinlock_t hw_lock;
+ struct completion hw_complete;
+ uint32_t open_count;
+ enum cam_hw_state hw_state;
+ struct cam_hw_soc_info soc_info;
+ void *node_info;
+ void *core_info;
+};
+
+#endif /* _CAM_HW_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_core/cam_hw_intf.h
new file mode 100644
index 000000000000..c0fbdbec896a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_hw_intf.h
@@ -0,0 +1,83 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_INTF_H_
+#define _CAM_HW_INTF_H_
+
+#include <linux/types.h>
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW driver and HW Manager.
+ */
+
+/**
+ * struct cam_hw_ops - Hardware layer interface functions
+ *
+ * @get_hw_caps: Function pointer for get hw caps
+ * @init: Function poniter for initialize hardware
+ * @deinit: Function pointer for deinitialize hardware
+ * @reset: Function pointer for reset hardware
+ * @reserve: Function pointer for reserve hardware
+ * @release: Function pointer for release hardware
+ * @start: Function pointer for start hardware
+ * @stop: Function pointer for stop hardware
+ * @read: Function pointer for read hardware registers
+ * @write: Function pointer for Write hardware registers
+ * @process_cmd: Function pointer for additional hardware controls
+ * @flush_cmd: Function pointer for flush requests
+ *
+ */
+struct cam_hw_ops {
+ int (*get_hw_caps)(void *hw_priv,
+ void *get_hw_cap_args, uint32_t arg_size);
+ int (*init)(void *hw_priv,
+ void *init_hw_args, uint32_t arg_size);
+ int (*deinit)(void *hw_priv,
+ void *init_hw_args, uint32_t arg_size);
+ int (*reset)(void *hw_priv,
+ void *reset_core_args, uint32_t arg_size);
+ int (*reserve)(void *hw_priv,
+ void *reserve_args, uint32_t arg_size);
+ int (*release)(void *hw_priv,
+ void *release_args, uint32_t arg_size);
+ int (*start)(void *hw_priv,
+ void *start_args, uint32_t arg_size);
+ int (*stop)(void *hw_priv,
+ void *stop_args, uint32_t arg_size);
+ int (*read)(void *hw_priv,
+ void *read_args, uint32_t arg_size);
+ int (*write)(void *hw_priv,
+ void *write_args, uint32_t arg_size);
+ int (*process_cmd)(void *hw_priv,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+ int (*flush)(void *hw_priv,
+ void *flush_args, uint32_t arg_size);
+};
+
+/**
+ * struct cam_hw_intf - Common hardware node
+ *
+ * @hw_type: Hardware type
+ * @hw_idx: Hardware ID
+ * @hw_ops: Hardware interface function table
+ * @hw_priv: Private hardware node pointer
+ *
+ */
+struct cam_hw_intf {
+ uint32_t hw_type;
+ uint32_t hw_idx;
+ struct cam_hw_ops hw_ops;
+ void *hw_priv;
+};
+
+#endif /* _CAM_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera_v3/cam_core/cam_hw_mgr_intf.h
new file mode 100644
index 000000000000..fd30f115cc0c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_hw_mgr_intf.h
@@ -0,0 +1,334 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_MGR_INTF_H_
+#define _CAM_HW_MGR_INTF_H_
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW Manager and Context.
+ */
+
+
+/* maximum context numbers */
+#define CAM_CTX_MAX 8
+
+/* maximum buf done irqs */
+#define CAM_NUM_OUT_PER_COMP_IRQ_MAX 12
+
+/* hardware event callback function type */
+typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id,
+ void *evt_data);
+
+/* hardware page fault callback function type */
+typedef int (*cam_hw_pagefault_cb_func)(void *context, unsigned long iova,
+ uint32_t buf_info);
+
+/**
+ * struct cam_hw_update_entry - Entry for hardware config
+ *
+ * @handle: Memory handle for the configuration
+ * @offset: Memory offset
+ * @len: Size of the configuration
+ * @flags: Flags for the config entry(eg. DMI)
+ * @addr: Address of hardware update entry
+ *
+ */
+struct cam_hw_update_entry {
+ int handle;
+ uint32_t offset;
+ uint32_t len;
+ uint32_t flags;
+ uintptr_t addr;
+};
+
+/**
+ * struct cam_hw_fence_map_entry - Entry for the resource to sync id map
+ *
+ * @resrouce_handle: Resource port id for the buffer
+ * @sync_id: Sync id
+ *
+ */
+struct cam_hw_fence_map_entry {
+ uint32_t resource_handle;
+ int32_t sync_id;
+};
+
+/**
+ * struct cam_hw_done_event_data - Payload for hw done event
+ *
+ * @num_handles: number of handles in the event
+ * @resrouce_handle: list of the resource handle
+ * @timestamp: time stamp
+ * @request_id: request identifier
+ *
+ */
+struct cam_hw_done_event_data {
+ uint32_t num_handles;
+ uint32_t resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+ struct timeval timestamp;
+ uint64_t request_id;
+};
+
+/**
+ * struct cam_hw_acquire_args - Payload for acquire command
+ *
+ * @context_data: Context data pointer for the callback function
+ * @event_cb: Callback function array
+ * @num_acq: Total number of acquire in the payload
+ * @acquire_info: Acquired resource array pointer
+ * @ctxt_to_hw_map: HW context (returned)
+ *
+ */
+struct cam_hw_acquire_args {
+ void *context_data;
+ cam_hw_event_cb_func event_cb;
+ uint32_t num_acq;
+ uint32_t acquire_info_size;
+ uintptr_t acquire_info;
+ void *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_hw_release_args - Payload for release command
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @active_req: Active request flag
+ *
+ */
+struct cam_hw_release_args {
+ void *ctxt_to_hw_map;
+ bool active_req;
+};
+
+/**
+ * struct cam_hw_start_args - Payload for start command
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @num_hw_update_entries: Number of Hardware configuration
+ * @hw_update_entries: Hardware configuration list
+ *
+ */
+struct cam_hw_start_args {
+ void *ctxt_to_hw_map;
+ uint32_t num_hw_update_entries;
+ struct cam_hw_update_entry *hw_update_entries;
+};
+
+/**
+ * struct cam_hw_stop_args - Payload for stop command
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @args: Arguments to pass for stop
+ *
+ */
+struct cam_hw_stop_args {
+ void *ctxt_to_hw_map;
+ void *args;
+};
+
+
+/**
+ * struct cam_hw_mgr_dump_pf_data - page fault debug data
+ *
+ * packet: pointer to packet
+ */
+struct cam_hw_mgr_dump_pf_data {
+ void *packet;
+};
+
+/**
+ * struct cam_hw_prepare_update_args - Payload for prepare command
+ *
+ * @packet: CSL packet from user mode driver
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @max_hw_update_entries: Maximum hardware update entries supported
+ * @hw_update_entries: Actual hardware update configuration (returned)
+ * @num_hw_update_entries: Number of actual hardware update entries (returned)
+ * @max_out_map_entries: Maximum output fence mapping supported
+ * @out_map_entries: Actual output fence mapping list (returned)
+ * @num_out_map_entries: Number of actual output fence mapping (returned)
+ * @max_in_map_entries: Maximum input fence mapping supported
+ * @in_map_entries: Actual input fence mapping list (returned)
+ * @num_in_map_entries: Number of acutal input fence mapping (returned)
+ * @priv: Private pointer of hw update
+ * @pf_data: Debug data for page fault
+ *
+ */
+struct cam_hw_prepare_update_args {
+ struct cam_packet *packet;
+ void *ctxt_to_hw_map;
+ uint32_t max_hw_update_entries;
+ struct cam_hw_update_entry *hw_update_entries;
+ uint32_t num_hw_update_entries;
+ uint32_t max_out_map_entries;
+ struct cam_hw_fence_map_entry *out_map_entries;
+ uint32_t num_out_map_entries;
+ uint32_t max_in_map_entries;
+ struct cam_hw_fence_map_entry *in_map_entries;
+ uint32_t num_in_map_entries;
+ void *priv;
+ struct cam_hw_mgr_dump_pf_data *pf_data;
+};
+
+/**
+ * struct cam_hw_stream_setttings - Payload for config stream command
+ *
+ * @packet: CSL packet from user mode driver
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @priv: Private pointer of hw update
+ *
+ */
+struct cam_hw_stream_setttings {
+ struct cam_packet *packet;
+ void *ctxt_to_hw_map;
+ void *priv;
+};
+
+/**
+ * struct cam_hw_config_args - Payload for config command
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @num_hw_update_entries: Number of hardware update entries
+ * @hw_update_entries: Hardware update list
+ * @out_map_entries: Out map info
+ * @num_out_map_entries: Number of out map entries
+ * @priv: Private pointer
+ * @request_id: Request ID
+ *
+ */
+struct cam_hw_config_args {
+ void *ctxt_to_hw_map;
+ uint32_t num_hw_update_entries;
+ struct cam_hw_update_entry *hw_update_entries;
+ struct cam_hw_fence_map_entry *out_map_entries;
+ uint32_t num_out_map_entries;
+ void *priv;
+ uint64_t request_id;
+ bool init_packet;
+};
+
+/**
+ * struct cam_hw_flush_args - Flush arguments
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @num_req_pending: Num request to flush, valid when flush type is REQ
+ * @flush_req_pending: Request pending pointers to flush
+ * @num_req_active: Num request to flush, valid when flush type is REQ
+ * @flush_req_active: Request active pointers to flush
+ * @flush_type: The flush type
+ *
+ */
+struct cam_hw_flush_args {
+ void *ctxt_to_hw_map;
+ uint32_t num_req_pending;
+ void *flush_req_pending[20];
+ uint32_t num_req_active;
+ void *flush_req_active[20];
+ enum flush_type_t flush_type;
+};
+
+/**
+ * struct cam_hw_dump_pf_args - Payload for dump pf info command
+ *
+ * @pf_data: Debug data for page fault
+ * @iova: Page fault address
+ * @buf_info: Info about memory buffer where page
+ * fault occurred
+ * @mem_found: If fault memory found in current
+ * request
+ *
+ */
+struct cam_hw_dump_pf_args {
+ struct cam_hw_mgr_dump_pf_data pf_data;
+ unsigned long iova;
+ uint32_t buf_info;
+ bool *mem_found;
+};
+
+/* enum cam_hw_mgr_command - Hardware manager command type */
+enum cam_hw_mgr_command {
+ CAM_HW_MGR_CMD_INTERNAL,
+ CAM_HW_MGR_CMD_DUMP_PF_INFO,
+};
+
+/**
+ * struct cam_hw_cmd_args - Payload for hw manager command
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @cmd_type HW command type
+ * @internal_args Arguments for internal command
+ * @pf_args Arguments for Dump PF info command
+ *
+ */
+struct cam_hw_cmd_args {
+ void *ctxt_to_hw_map;
+ uint32_t cmd_type;
+ union {
+ void *internal_args;
+ struct cam_hw_dump_pf_args pf_args;
+ } u;
+};
+
+/**
+ * cam_hw_mgr_intf - HW manager interface
+ *
+ * @hw_mgr_priv: HW manager object
+ * @hw_get_caps: Function pointer for get hw caps
+ * args = cam_query_cap_cmd
+ * @hw_acquire: Function poniter for acquire hw resources
+ * args = cam_hw_acquire_args
+ * @hw_release: Function pointer for release hw device resource
+ * args = cam_hw_release_args
+ * @hw_start: Function pointer for start hw devices
+ * args = cam_hw_start_args
+ * @hw_stop: Function pointer for stop hw devices
+ * args = cam_hw_stop_args
+ * @hw_prepare_update: Function pointer for prepare hw update for hw
+ * devices args = cam_hw_prepare_update_args
+ * @hw_config_stream_settings: Function pointer for configure stream for hw
+ * devices args = cam_hw_stream_setttings
+ * @hw_config: Function pointer for configure hw devices
+ * args = cam_hw_config_args
+ * @hw_read: Function pointer for read hardware registers
+ * @hw_write: Function pointer for Write hardware registers
+ * @hw_cmd: Function pointer for any customized commands for
+ * the hardware manager
+ * @hw_open: Function pointer for HW init
+ * @hw_close: Function pointer for HW deinit
+ * @hw_flush: Function pointer for HW flush
+ *
+ */
+struct cam_hw_mgr_intf {
+ void *hw_mgr_priv;
+
+ int (*hw_get_caps)(void *hw_priv, void *hw_caps_args);
+ int (*hw_acquire)(void *hw_priv, void *hw_acquire_args);
+ int (*hw_release)(void *hw_priv, void *hw_release_args);
+ int (*hw_start)(void *hw_priv, void *hw_start_args);
+ int (*hw_stop)(void *hw_priv, void *hw_stop_args);
+ int (*hw_prepare_update)(void *hw_priv, void *hw_prepare_update_args);
+ int (*hw_config_stream_settings)(void *hw_priv,
+ void *hw_stream_settings);
+ int (*hw_config)(void *hw_priv, void *hw_config_args);
+ int (*hw_read)(void *hw_priv, void *read_args);
+ int (*hw_write)(void *hw_priv, void *write_args);
+ int (*hw_cmd)(void *hw_priv, void *write_args);
+ int (*hw_open)(void *hw_priv, void *fw_download_args);
+ int (*hw_close)(void *hw_priv, void *hw_close_args);
+ int (*hw_flush)(void *hw_priv, void *hw_flush_args);
+};
+
+#endif /* _CAM_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_node.c b/drivers/media/platform/msm/camera_v3/cam_core/cam_node.c
new file mode 100644
index 000000000000..9da340b1e766
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_node.c
@@ -0,0 +1,790 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+
+#include "cam_node.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+
+static void cam_node_print_ctx_state(
+ struct cam_node *node)
+{
+ int i;
+ struct cam_context *ctx;
+
+ CAM_INFO(CAM_CORE, "[%s] state=%d, ctx_size %d",
+ node->name, node->state, node->ctx_size);
+
+ mutex_lock(&node->list_mutex);
+ for (i = 0; i < node->ctx_size; i++) {
+ ctx = &node->ctx_list[i];
+
+ spin_lock_bh(&ctx->lock);
+ CAM_INFO(CAM_CORE,
+ "[%s][%d] : state=%d, refcount=%d, active_req_list=%d, pending_req_list=%d, wait_req_list=%d, free_req_list=%d",
+ ctx->dev_name ? ctx->dev_name : "null",
+ i, ctx->state,
+ atomic_read(&(ctx->refcount.refcount)),
+ list_empty(&ctx->active_req_list),
+ list_empty(&ctx->pending_req_list),
+ list_empty(&ctx->wait_req_list),
+ list_empty(&ctx->free_req_list));
+ spin_unlock_bh(&ctx->lock);
+ }
+ mutex_unlock(&node->list_mutex);
+}
+
+static struct cam_context *cam_node_get_ctxt_from_free_list(
+ struct cam_node *node)
+{
+ struct cam_context *ctx = NULL;
+
+ mutex_lock(&node->list_mutex);
+ if (!list_empty(&node->free_ctx_list)) {
+ ctx = list_first_entry(&node->free_ctx_list,
+ struct cam_context, list);
+ list_del_init(&ctx->list);
+ }
+ mutex_unlock(&node->list_mutex);
+ if (ctx)
+ kref_init(&ctx->refcount);
+ return ctx;
+}
+
+void cam_node_put_ctxt_to_free_list(struct kref *ref)
+{
+ struct cam_context *ctx =
+ container_of(ref, struct cam_context, refcount);
+ struct cam_node *node = ctx->node;
+
+ mutex_lock(&node->list_mutex);
+ list_add_tail(&ctx->list, &node->free_ctx_list);
+ mutex_unlock(&node->list_mutex);
+}
+
+static int __cam_node_handle_query_cap(struct cam_node *node,
+ struct cam_query_cap_cmd *query)
+{
+ int rc = -EFAULT;
+
+ if (!query) {
+ CAM_ERR(CAM_CORE, "Invalid params");
+ return -EINVAL;
+ }
+
+ if (node->hw_mgr_intf.hw_get_caps) {
+ rc = node->hw_mgr_intf.hw_get_caps(
+ node->hw_mgr_intf.hw_mgr_priv, query);
+ }
+
+ return rc;
+}
+
+static int __cam_node_handle_acquire_dev(struct cam_node *node,
+ struct cam_acquire_dev_cmd *acquire)
+{
+ int rc = 0;
+ struct cam_context *ctx = NULL;
+
+ if (!acquire)
+ return -EINVAL;
+
+ ctx = cam_node_get_ctxt_from_free_list(node);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "No free ctx in free list node %s",
+ node->name);
+ cam_node_print_ctx_state(node);
+
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = cam_context_handle_acquire_dev(ctx, acquire);
+ if (rc) {
+ CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
+ node->name);
+ goto free_ctx;
+ }
+
+ CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
+ node->name, ctx->ctx_id);
+
+ return 0;
+free_ctx:
+ cam_context_putref(ctx);
+err:
+ return rc;
+}
+
+static int __cam_node_handle_acquire_hw_v1(struct cam_node *node,
+ struct cam_acquire_hw_cmd_v1 *acquire)
+{
+ int rc = 0;
+ struct cam_context *ctx = NULL;
+
+ if (!acquire)
+ return -EINVAL;
+
+ if (acquire->dev_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
+ return -EINVAL;
+ }
+
+ if (acquire->session_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *)cam_get_device_priv(acquire->dev_handle);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ acquire->dev_handle);
+ return -EINVAL;
+ }
+
+ rc = cam_context_handle_acquire_hw(ctx, acquire);
+ if (rc) {
+ CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
+ node->name);
+ return rc;
+ }
+
+ CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
+ node->name, ctx->ctx_id);
+
+ return 0;
+}
+
+static int __cam_node_handle_start_dev(struct cam_node *node,
+ struct cam_start_stop_dev_cmd *start)
+{
+ struct cam_context *ctx = NULL;
+ int rc;
+
+ if (!start)
+ return -EINVAL;
+
+ if (start->dev_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
+ return -EINVAL;
+ }
+
+ if (start->session_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ start->dev_handle);
+ return -EINVAL;
+ }
+
+ rc = cam_context_handle_start_dev(ctx, start);
+ if (rc)
+ CAM_ERR(CAM_CORE, "Start failure for node %s", node->name);
+
+ return rc;
+}
+
+static int __cam_node_handle_stop_dev(struct cam_node *node,
+ struct cam_start_stop_dev_cmd *stop)
+{
+ struct cam_context *ctx = NULL;
+ int rc;
+
+ if (!stop)
+ return -EINVAL;
+
+ if (stop->dev_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
+ return -EINVAL;
+ }
+
+ if (stop->session_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ stop->dev_handle);
+ return -EINVAL;
+ }
+
+ rc = cam_context_handle_stop_dev(ctx, stop);
+ if (rc)
+ CAM_ERR(CAM_CORE, "Stop failure for node %s", node->name);
+
+ return rc;
+}
+
+static int __cam_node_handle_config_dev(struct cam_node *node,
+ struct cam_config_dev_cmd *config)
+{
+ struct cam_context *ctx = NULL;
+ int rc;
+
+ if (!config)
+ return -EINVAL;
+
+ if (config->dev_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
+ return -EINVAL;
+ }
+
+ if (config->session_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ config->dev_handle);
+ return -EINVAL;
+ }
+
+ rc = cam_context_handle_config_dev(ctx, config);
+ if (rc)
+ CAM_ERR(CAM_CORE, "Config failure for node %s", node->name);
+
+ return rc;
+}
+
+static int __cam_node_handle_flush_dev(struct cam_node *node,
+ struct cam_flush_dev_cmd *flush)
+{
+ struct cam_context *ctx = NULL;
+ int rc;
+
+ if (!flush)
+ return -EINVAL;
+
+ if (flush->dev_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
+ return -EINVAL;
+ }
+
+ if (flush->session_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *)cam_get_device_priv(flush->dev_handle);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ flush->dev_handle);
+ return -EINVAL;
+ }
+
+ rc = cam_context_handle_flush_dev(ctx, flush);
+ if (rc)
+ CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
+
+ return rc;
+}
+
+static int __cam_node_handle_release_dev(struct cam_node *node,
+ struct cam_release_dev_cmd *release)
+{
+ int rc = 0;
+ struct cam_context *ctx = NULL;
+
+ if (!release)
+ return -EINVAL;
+
+ if (release->dev_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
+ return -EINVAL;
+ }
+
+ if (release->session_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
+ release->dev_handle, node->name);
+ return -EINVAL;
+ }
+
+ if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) {
+ rc = cam_context_handle_release_dev(ctx, release);
+ if (rc)
+ CAM_ERR(CAM_CORE, "context release failed for node %s",
+ node->name);
+ } else {
+ CAM_WARN(CAM_CORE,
+ "node %s context id %u state %d invalid to release hdl",
+ node->name, ctx->ctx_id, ctx->state);
+ goto destroy_dev_hdl;
+ }
+
+ cam_context_putref(ctx);
+
+destroy_dev_hdl:
+ rc = cam_destroy_device_hdl(release->dev_handle);
+ if (rc)
+ CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
+ node->name);
+ else
+ ctx->dev_hdl = -1;
+
+ CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
+ node->name, ctx->ctx_id,
+ atomic_read(&(ctx->refcount.refcount)));
+
+ return rc;
+}
+
+static int __cam_node_handle_release_hw_v1(struct cam_node *node,
+ struct cam_release_hw_cmd_v1 *release)
+{
+ int rc = 0;
+ struct cam_context *ctx = NULL;
+
+ if (!release)
+ return -EINVAL;
+
+ if (release->dev_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
+ return -EINVAL;
+ }
+
+ if (release->session_handle <= 0) {
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
+ release->dev_handle, node->name);
+ return -EINVAL;
+ }
+
+ rc = cam_context_handle_release_hw(ctx, release);
+ if (rc)
+ CAM_ERR(CAM_CORE, "context release failed node %s", node->name);
+
+ CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
+ node->name, ctx->ctx_id,
+ atomic_read(&(ctx->refcount.refcount)));
+
+ return rc;
+}
+
+static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info)
+{
+ struct cam_context *ctx = NULL;
+
+ if (!info)
+ return -EINVAL;
+
+ ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ info->dev_hdl);
+ return -EINVAL;
+ }
+ return cam_context_handle_crm_get_dev_info(ctx, info);
+}
+
+static int __cam_node_crm_link_setup(
+ struct cam_req_mgr_core_dev_link_setup *setup)
+{
+ int rc;
+ struct cam_context *ctx = NULL;
+
+ if (!setup)
+ return -EINVAL;
+
+ ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ setup->dev_hdl);
+ return -EINVAL;
+ }
+
+ if (setup->link_enable)
+ rc = cam_context_handle_crm_link(ctx, setup);
+ else
+ rc = cam_context_handle_crm_unlink(ctx, setup);
+
+ return rc;
+}
+
+static int __cam_node_crm_apply_req(struct cam_req_mgr_apply_request *apply)
+{
+ struct cam_context *ctx = NULL;
+
+ if (!apply)
+ return -EINVAL;
+
+ ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ apply->dev_hdl);
+ return -EINVAL;
+ }
+
+ trace_cam_apply_req("Node", apply->request_id);
+
+ return cam_context_handle_crm_apply_req(ctx, apply);
+}
+
+static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush)
+{
+ struct cam_context *ctx = NULL;
+
+ if (!flush) {
+ CAM_ERR(CAM_CORE, "Invalid flush request payload");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ flush->dev_hdl);
+ return -EINVAL;
+ }
+
+ return cam_context_handle_crm_flush_req(ctx, flush);
+}
+
+static int __cam_node_crm_process_evt(
+ struct cam_req_mgr_link_evt_data *evt_data)
+{
+ struct cam_context *ctx = NULL;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_CORE, "Invalid process event request payload");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *) cam_get_device_priv(evt_data->dev_hdl);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ evt_data->dev_hdl);
+ return -EINVAL;
+ }
+ return cam_context_handle_crm_process_evt(ctx, evt_data);
+}
+
+int cam_node_deinit(struct cam_node *node)
+{
+ if (node)
+ memset(node, 0, sizeof(*node));
+
+ CAM_DBG(CAM_CORE, "deinit complete");
+
+ return 0;
+}
+
+int cam_node_shutdown(struct cam_node *node)
+{
+ int i = 0;
+ int rc = 0;
+
+ if (!node)
+ return -EINVAL;
+
+ for (i = 0; i < node->ctx_size; i++) {
+ if (node->ctx_list[i].dev_hdl > 0) {
+ CAM_DBG(CAM_CORE,
+ "Node [%s] invoking shutdown on context [%d]",
+ node->name, i);
+ rc = cam_context_shutdown(&(node->ctx_list[i]));
+ }
+ }
+
+ if (node->hw_mgr_intf.hw_close)
+ node->hw_mgr_intf.hw_close(node->hw_mgr_intf.hw_mgr_priv,
+ NULL);
+
+ return 0;
+}
+
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+ struct cam_context *ctx_list, uint32_t ctx_size, char *name)
+{
+ int rc = 0;
+ int i;
+
+ if (!node || !hw_mgr_intf ||
+ sizeof(node->hw_mgr_intf) != sizeof(*hw_mgr_intf)) {
+ return -EINVAL;
+ }
+
+ memset(node, 0, sizeof(*node));
+
+ strlcpy(node->name, name, sizeof(node->name));
+
+ memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
+ node->crm_node_intf.apply_req = __cam_node_crm_apply_req;
+ node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
+ node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
+ node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
+ node->crm_node_intf.process_evt = __cam_node_crm_process_evt;
+
+ mutex_init(&node->list_mutex);
+ INIT_LIST_HEAD(&node->free_ctx_list);
+ node->ctx_list = ctx_list;
+ node->ctx_size = ctx_size;
+ for (i = 0; i < ctx_size; i++) {
+ if (!ctx_list[i].state_machine) {
+ CAM_ERR(CAM_CORE,
+ "camera context %d is not initialized", i);
+ rc = -1;
+ goto err;
+ }
+ INIT_LIST_HEAD(&ctx_list[i].list);
+ list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
+ ctx_list[i].node = node;
+ }
+
+ node->state = CAM_NODE_STATE_INIT;
+err:
+ CAM_DBG(CAM_CORE, "Exit. (rc = %d)", rc);
+ return rc;
+}
+
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
+{
+ int rc = 0;
+
+ if (!cmd)
+ return -EINVAL;
+
+ CAM_DBG(CAM_CORE, "handle cmd %d", cmd->op_code);
+
+ switch (cmd->op_code) {
+ case CAM_QUERY_CAP: {
+ struct cam_query_cap_cmd query;
+
+ if (copy_from_user(&query, u64_to_user_ptr(cmd->handle),
+ sizeof(query))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = __cam_node_handle_query_cap(node, &query);
+ if (rc) {
+ CAM_ERR(CAM_CORE, "querycap is failed(rc = %d)",
+ rc);
+ break;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(cmd->handle), &query,
+ sizeof(query)))
+ rc = -EFAULT;
+
+ break;
+ }
+ case CAM_ACQUIRE_DEV: {
+ struct cam_acquire_dev_cmd acquire;
+
+ if (copy_from_user(&acquire, u64_to_user_ptr(cmd->handle),
+ sizeof(acquire))) {
+ rc = -EFAULT;
+ break;
+ }
+ rc = __cam_node_handle_acquire_dev(node, &acquire);
+ if (rc) {
+ CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)",
+ rc);
+ break;
+ }
+ if (copy_to_user(u64_to_user_ptr(cmd->handle), &acquire,
+ sizeof(acquire)))
+ rc = -EFAULT;
+ break;
+ }
+ case CAM_ACQUIRE_HW: {
+ uint32_t api_version;
+ void *acquire_ptr = NULL;
+ size_t acquire_size;
+
+ if (copy_from_user(&api_version, (void __user *)cmd->handle,
+ sizeof(api_version))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (api_version == 1) {
+ acquire_size = sizeof(struct cam_acquire_hw_cmd_v1);
+ } else {
+ CAM_ERR(CAM_CORE, "Unsupported api version %d",
+ api_version);
+ rc = -EINVAL;
+ break;
+ }
+
+ acquire_ptr = kzalloc(acquire_size, GFP_KERNEL);
+ if (!acquire_ptr) {
+ CAM_ERR(CAM_CORE, "No memory for acquire HW");
+ rc = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(acquire_ptr, (void __user *)cmd->handle,
+ acquire_size)) {
+ rc = -EFAULT;
+ goto acquire_kfree;
+ }
+
+ if (api_version == 1) {
+ rc = __cam_node_handle_acquire_hw_v1(node, acquire_ptr);
+ if (rc) {
+ CAM_ERR(CAM_CORE,
+ "acquire device failed(rc = %d)", rc);
+ goto acquire_kfree;
+ }
+ }
+
+ if (copy_to_user((void __user *)cmd->handle, acquire_ptr,
+ acquire_size))
+ rc = -EFAULT;
+
+acquire_kfree:
+ kfree(acquire_ptr);
+ break;
+ }
+ case CAM_START_DEV: {
+ struct cam_start_stop_dev_cmd start;
+
+ if (copy_from_user(&start, u64_to_user_ptr(cmd->handle),
+ sizeof(start)))
+ rc = -EFAULT;
+ else {
+ rc = __cam_node_handle_start_dev(node, &start);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "start device failed(rc = %d)", rc);
+ }
+ break;
+ }
+ case CAM_STOP_DEV: {
+ struct cam_start_stop_dev_cmd stop;
+
+ if (copy_from_user(&stop, u64_to_user_ptr(cmd->handle),
+ sizeof(stop)))
+ rc = -EFAULT;
+ else {
+ rc = __cam_node_handle_stop_dev(node, &stop);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "stop device failed(rc = %d)", rc);
+ }
+ break;
+ }
+ case CAM_CONFIG_DEV: {
+ struct cam_config_dev_cmd config;
+
+ if (copy_from_user(&config, u64_to_user_ptr(cmd->handle),
+ sizeof(config)))
+ rc = -EFAULT;
+ else {
+ rc = __cam_node_handle_config_dev(node, &config);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "config device failed(rc = %d)", rc);
+ }
+ break;
+ }
+ case CAM_RELEASE_DEV: {
+ struct cam_release_dev_cmd release;
+
+ if (copy_from_user(&release, u64_to_user_ptr(cmd->handle),
+ sizeof(release)))
+ rc = -EFAULT;
+ else {
+ rc = __cam_node_handle_release_dev(node, &release);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "release device failed(rc = %d)", rc);
+ }
+ break;
+ }
+ case CAM_RELEASE_HW: {
+ uint32_t api_version;
+ size_t release_size;
+ void *release_ptr = NULL;
+
+ if (copy_from_user(&api_version, (void __user *)cmd->handle,
+ sizeof(api_version))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (api_version == 1) {
+ release_size = sizeof(struct cam_release_hw_cmd_v1);
+ } else {
+ CAM_ERR(CAM_CORE, "Unsupported api version %d",
+ api_version);
+ rc = -EINVAL;
+ break;
+ }
+
+ release_ptr = kzalloc(release_size, GFP_KERNEL);
+ if (!release_ptr) {
+ CAM_ERR(CAM_CORE, "No memory for release HW");
+ rc = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(release_ptr, (void __user *)cmd->handle,
+ release_size)) {
+ rc = -EFAULT;
+ goto release_kfree;
+ }
+
+ if (api_version == 1) {
+ rc = __cam_node_handle_release_hw_v1(node, release_ptr);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "release device failed(rc = %d)", rc);
+ }
+
+release_kfree:
+ kfree(release_ptr);
+ break;
+ }
+ case CAM_FLUSH_REQ: {
+ struct cam_flush_dev_cmd flush;
+
+ if (copy_from_user(&flush, u64_to_user_ptr(cmd->handle),
+ sizeof(flush)))
+ rc = -EFAULT;
+ else {
+ rc = __cam_node_handle_flush_dev(node, &flush);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "flush device failed(rc = %d)", rc);
+ }
+ break;
+ }
+ default:
+ CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_node.h b/drivers/media/platform/msm/camera_v3/cam_core/cam_node.h
new file mode 100644
index 000000000000..c5216bac0606
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_node.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_NODE_H_
+#define _CAM_NODE_H_
+
+#include <linux/kref.h>
+#include "cam_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+#define CAM_NODE_NAME_LENGTH_MAX 256
+
+#define CAM_NODE_STATE_UNINIT 0
+#define CAM_NODE_STATE_INIT 1
+
+/**
+ * struct cam_node - Singleton Node for camera HW devices
+ *
+ * @name: Name for struct cam_node
+ * @state: Node state:
+ * 0 = uninitialized, 1 = initialized
+ * @list_mutex: Mutex for the context pool
+ * @free_ctx_list: Free context pool list
+ * @ctx_list: Context list
+ * @ctx_size: Context list size
+ * @hw_mgr_intf: Interface for cam_node to HW
+ * @crm_node_intf: Interface for the CRM to cam_node
+ *
+ */
+struct cam_node {
+ char name[CAM_NODE_NAME_LENGTH_MAX];
+ uint32_t state;
+
+ /* context pool */
+ struct mutex list_mutex;
+ struct list_head free_ctx_list;
+ struct cam_context *ctx_list;
+ uint32_t ctx_size;
+
+ /* interfaces */
+ struct cam_hw_mgr_intf hw_mgr_intf;
+ struct cam_req_mgr_kmd_ops crm_node_intf;
+};
+
+/**
+ * cam_node_handle_ioctl()
+ *
+ * @brief: Handle ioctl commands
+ *
+ * @node: Node handle
+ * @cmd: IOCTL command
+ *
+ */
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd);
+
+/**
+ * cam_node_deinit()
+ *
+ * @brief: Deinitialization function for the Node interface
+ *
+ * @node: Node handle
+ *
+ */
+int cam_node_deinit(struct cam_node *node);
+
+/**
+ * cam_node_shutdown()
+ *
+ * @brief: Shutdowns/Closes the cam node.
+ *
+ * @node: Cam_node pointer
+ *
+ */
+int cam_node_shutdown(struct cam_node *node);
+
+/**
+ * cam_node_init()
+ *
+ * @brief: Initialization function for the Node interface.
+ *
+ * @node: Cam_node pointer
+ * @hw_mgr_intf: HW manager interface blob
+ * @ctx_list: List of cam_contexts to be added
+ * @ctx_size: Size of the cam_context
+ * @name: Name for the node
+ *
+ */
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+ struct cam_context *ctx_list, uint32_t ctx_size, char *name);
+
+/**
+ * cam_node_put_ctxt_to_free_list()
+ *
+ * @brief: Put context in node free list.
+ *
+ * @ref: Context's kref object
+ *
+ */
+void cam_node_put_ctxt_to_free_list(struct kref *ref);
+
+#endif /* _CAM_NODE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera_v3/cam_core/cam_subdev.c
new file mode 100644
index 000000000000..6511a188e3b8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_core/cam_subdev.c
@@ -0,0 +1,161 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_debug_util.h"
+
+/**
+ * cam_subdev_subscribe_event()
+ *
+ * @brief: function to subscribe to v4l2 events
+ *
+ * @sd: Pointer to struct v4l2_subdev.
+ * @fh: Pointer to struct v4l2_fh.
+ * @sub: Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL);
+}
+
+/**
+ * cam_subdev_unsubscribe_event()
+ *
+ * @brief: function to unsubscribe from v4l2 events
+ *
+ * @sd: Pointer to struct v4l2_subdev.
+ * @fh: Pointer to struct v4l2_fh.
+ * @sub: Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static long cam_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd,
+ void *arg)
+{
+ long rc;
+ struct cam_node *node =
+ (struct cam_node *) v4l2_get_subdevdata(sd);
+
+ if (!node || node->state == CAM_NODE_STATE_UNINIT) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_node_handle_ioctl(node,
+ (struct cam_control *) arg);
+ break;
+ default:
+ CAM_ERR(CAM_CORE, "Invalid command %d for %s", cmd,
+ node->name);
+ rc = -EINVAL;
+ }
+end:
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int rc;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_CORE, "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+ rc = cam_subdev_ioctl(sd, cmd, &cmd_data);
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_CORE,
+ "Failed to copy to user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
+ return rc;
+}
+#endif
+
+const struct v4l2_subdev_core_ops cam_subdev_core_ops = {
+ .ioctl = cam_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_subdev_compat_ioctl,
+#endif
+ .subscribe_event = cam_subdev_subscribe_event,
+ .unsubscribe_event = cam_subdev_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops cam_subdev_ops = {
+ .core = &cam_subdev_core_ops,
+};
+
+int cam_subdev_remove(struct cam_subdev *sd)
+{
+ if (!sd)
+ return -EINVAL;
+
+ cam_unregister_subdev(sd);
+ cam_node_deinit((struct cam_node *)sd->token);
+ kfree(sd->token);
+
+ return 0;
+}
+
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+ char *name, uint32_t dev_type)
+{
+ int rc;
+ struct cam_node *node = NULL;
+
+ if (!sd || !pdev || !name)
+ return -EINVAL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* Setup camera v4l2 subdevice */
+ sd->pdev = pdev;
+ sd->name = name;
+ sd->ops = &cam_subdev_ops;
+ sd->token = node;
+ sd->sd_flags =
+ V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->ent_function = dev_type;
+
+ rc = cam_register_subdev(sd);
+ if (rc) {
+ CAM_ERR(CAM_CORE, "cam_register_subdev() failed for dev: %s",
+ sd->name);
+ goto err;
+ }
+ platform_set_drvdata(sd->pdev, sd);
+ return rc;
+err:
+ kfree(node);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/Makefile b/drivers/media/platform/msm/camera_v3/cam_cpas/Makefile
new file mode 100644
index 000000000000..2079e6134adc
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/cpas_top
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/camss_top
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cpas_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += camss_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas_soc.o cam_cpas_intf.o cam_cpas_hw.o \ No newline at end of file
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw.c
new file mode 100644
index 000000000000..2a91f03ecdc8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw.c
@@ -0,0 +1,1710 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "cam_cpas_hw.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_soc.h"
+
+static uint cam_min_camnoc_ib_bw;
+module_param(cam_min_camnoc_ib_bw, uint, 0644);
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+ enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ uint32_t value;
+ int reg_base_index;
+
+ if (reg_info->enable == false)
+ return 0;
+
+ reg_base_index = cpas_core->regbase_index[reg_base];
+ if (reg_base_index == -1)
+ return -EINVAL;
+
+ if (reg_info->masked_value) {
+ value = cam_io_r_mb(
+ soc_info->reg_map[reg_base_index].mem_base +
+ reg_info->offset);
+ value = value & (~reg_info->mask);
+ value = value | (reg_info->value << reg_info->shift);
+ } else {
+ value = reg_info->value;
+ }
+
+ CAM_DBG(CAM_CPAS, "Base[%d] Offset[0x%8x] Value[0x%8x]",
+ reg_base, reg_info->offset, value);
+
+ cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
+ reg_info->offset);
+
+ return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_level(
+ struct cam_cpas_bus_client *bus_client, unsigned int level)
+{
+ if (!bus_client->valid || (bus_client->dyn_vote == true)) {
+ CAM_ERR(CAM_CPAS, "Invalid params %d %d", bus_client->valid,
+ bus_client->dyn_vote);
+ return -EINVAL;
+ }
+
+ if (level >= bus_client->num_usecases) {
+ CAM_ERR(CAM_CPAS, "Invalid vote level=%d, usecases=%d", level,
+ bus_client->num_usecases);
+ return -EINVAL;
+ }
+
+ if (level == bus_client->curr_vote_level)
+ return 0;
+
+ CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] index[%d]",
+ bus_client->client_id, bus_client->name, level);
+ msm_bus_scale_client_update_request(bus_client->client_id, level);
+ bus_client->curr_vote_level = level;
+
+ return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_bw(
+ struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib,
+ bool camnoc_bw)
+{
+ struct msm_bus_paths *path;
+ struct msm_bus_scale_pdata *pdata;
+ int idx = 0;
+ uint64_t min_camnoc_ib_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+
+ if (cam_min_camnoc_ib_bw > 0)
+ min_camnoc_ib_bw = (uint64_t)cam_min_camnoc_ib_bw * 1000000L;
+
+ CAM_DBG(CAM_CPAS, "cam_min_camnoc_ib_bw = %d, min_camnoc_ib_bw=%llu",
+ cam_min_camnoc_ib_bw, min_camnoc_ib_bw);
+
+ if (!bus_client->valid) {
+ CAM_ERR(CAM_CPAS, "bus client not valid");
+ return -EINVAL;
+ }
+
+ if ((bus_client->num_usecases != 2) ||
+ (bus_client->num_paths != 1) ||
+ (bus_client->dyn_vote != true)) {
+ CAM_ERR(CAM_CPAS, "dynamic update not allowed %d %d %d",
+ bus_client->num_usecases, bus_client->num_paths,
+ bus_client->dyn_vote);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bus_client->lock);
+
+ if (bus_client->curr_vote_level > 1) {
+ CAM_ERR(CAM_CPAS, "curr_vote_level %d cannot be greater than 1",
+ bus_client->curr_vote_level);
+ mutex_unlock(&bus_client->lock);
+ return -EINVAL;
+ }
+
+ idx = bus_client->curr_vote_level;
+ idx = 1 - idx;
+ bus_client->curr_vote_level = idx;
+ mutex_unlock(&bus_client->lock);
+
+ if (camnoc_bw == true) {
+ if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_CAMNOC_AB_BW))
+ ab = CAM_CPAS_AXI_MIN_CAMNOC_AB_BW;
+
+ if ((ib > 0) && (ib < min_camnoc_ib_bw))
+ ib = min_camnoc_ib_bw;
+ } else {
+ if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_MNOC_AB_BW))
+ ab = CAM_CPAS_AXI_MIN_MNOC_AB_BW;
+
+ if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_MNOC_IB_BW))
+ ib = CAM_CPAS_AXI_MIN_MNOC_IB_BW;
+ }
+
+ pdata = bus_client->pdata;
+ path = &(pdata->usecase[idx]);
+ path->vectors[0].ab = ab;
+ path->vectors[0].ib = ib;
+
+ CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] :ab[%llu] ib[%llu], index[%d]",
+ bus_client->client_id, bus_client->name, ab, ib, idx);
+ msm_bus_scale_client_update_request(bus_client->client_id, idx);
+
+ return 0;
+}
+
+static int cam_cpas_util_register_bus_client(
+ struct cam_hw_soc_info *soc_info, struct device_node *dev_node,
+ struct cam_cpas_bus_client *bus_client)
+{
+ struct msm_bus_scale_pdata *pdata = NULL;
+ uint32_t client_id;
+ int rc;
+
+ pdata = msm_bus_pdata_from_node(soc_info->pdev,
+ dev_node);
+ if (!pdata) {
+ CAM_ERR(CAM_CPAS, "failed get_pdata");
+ return -EINVAL;
+ }
+
+ if ((pdata->num_usecases == 0) ||
+ (pdata->usecase[0].num_paths == 0)) {
+ CAM_ERR(CAM_CPAS, "usecase=%d", pdata->num_usecases);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ client_id = msm_bus_scale_register_client(pdata);
+ if (!client_id) {
+ CAM_ERR(CAM_CPAS, "failed in register ahb bus client");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ bus_client->dyn_vote = of_property_read_bool(dev_node,
+ "qcom,msm-bus-vector-dyn-vote");
+
+ if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
+ CAM_ERR(CAM_CPAS, "Excess or less vectors %d",
+ pdata->num_usecases);
+ rc = -EINVAL;
+ goto fail_unregister_client;
+ }
+
+ msm_bus_scale_client_update_request(client_id, 0);
+
+ bus_client->src = pdata->usecase[0].vectors[0].src;
+ bus_client->dst = pdata->usecase[0].vectors[0].dst;
+ bus_client->pdata = pdata;
+ bus_client->client_id = client_id;
+ bus_client->num_usecases = pdata->num_usecases;
+ bus_client->num_paths = pdata->usecase[0].num_paths;
+ bus_client->curr_vote_level = 0;
+ bus_client->valid = true;
+ bus_client->name = pdata->name;
+ mutex_init(&bus_client->lock);
+
+ CAM_DBG(CAM_CPAS, "Bus Client=[%d][%s] : src=%d, dst=%d",
+ bus_client->client_id, bus_client->name,
+ bus_client->src, bus_client->dst);
+
+ return 0;
+fail_unregister_client:
+ msm_bus_scale_unregister_client(bus_client->client_id);
+error:
+ return rc;
+
+}
+
+static int cam_cpas_util_unregister_bus_client(
+ struct cam_cpas_bus_client *bus_client)
+{
+ if (!bus_client->valid)
+ return -EINVAL;
+
+ if (bus_client->dyn_vote)
+ cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0, false);
+ else
+ cam_cpas_util_vote_bus_client_level(bus_client, 0);
+
+ msm_bus_scale_unregister_client(bus_client->client_id);
+ bus_client->valid = false;
+
+ mutex_destroy(&bus_client->lock);
+
+ return 0;
+}
+
+static int cam_cpas_util_axi_cleanup(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info)
+{
+ struct cam_cpas_private_soc *soc_private =
+ (struct cam_cpas_private_soc *)soc_info->soc_private;
+ struct cam_cpas_axi_port *curr_port;
+ struct cam_cpas_axi_port *temp_port;
+
+ list_for_each_entry_safe(curr_port, temp_port,
+ &cpas_core->axi_ports_list_head, sibling_port) {
+ cam_cpas_util_unregister_bus_client(&curr_port->mnoc_bus);
+ of_node_put(curr_port->axi_port_mnoc_node);
+ if (soc_private->axi_camnoc_based) {
+ cam_cpas_util_unregister_bus_client(
+ &curr_port->camnoc_bus);
+ of_node_put(curr_port->axi_port_camnoc_node);
+ }
+ of_node_put(curr_port->axi_port_node);
+ list_del(&curr_port->sibling_port);
+ mutex_destroy(&curr_port->lock);
+ kfree(curr_port);
+ }
+
+ of_node_put(soc_private->axi_port_list_node);
+
+ return 0;
+}
+
+static int cam_cpas_util_axi_setup(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info)
+{
+ struct cam_cpas_private_soc *soc_private =
+ (struct cam_cpas_private_soc *)soc_info->soc_private;
+ struct cam_cpas_axi_port *axi_port;
+ int rc;
+ struct device_node *axi_port_list_node;
+ struct device_node *axi_port_node = NULL;
+ struct device_node *axi_port_mnoc_node = NULL;
+ struct device_node *axi_port_camnoc_node = NULL;
+
+ INIT_LIST_HEAD(&cpas_core->axi_ports_list_head);
+
+ axi_port_list_node = of_find_node_by_name(soc_info->pdev->dev.of_node,
+ "qcom,axi-port-list");
+ if (!axi_port_list_node) {
+ CAM_ERR(CAM_CPAS, "Node qcom,axi-port-list not found.");
+ return -EINVAL;
+ }
+
+ soc_private->axi_port_list_node = axi_port_list_node;
+
+ for_each_available_child_of_node(axi_port_list_node, axi_port_node) {
+ axi_port = kzalloc(sizeof(*axi_port), GFP_KERNEL);
+ if (!axi_port) {
+ rc = -ENOMEM;
+ goto error_previous_axi_cleanup;
+ }
+ axi_port->axi_port_node = axi_port_node;
+
+ rc = of_property_read_string_index(axi_port_node,
+ "qcom,axi-port-name", 0,
+ (const char **)&axi_port->axi_port_name);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "failed to read qcom,axi-port-name rc=%d", rc);
+ goto port_name_fail;
+ }
+
+ axi_port_mnoc_node = of_find_node_by_name(axi_port_node,
+ "qcom,axi-port-mnoc");
+ if (!axi_port_mnoc_node) {
+ CAM_ERR(CAM_CPAS, "Node qcom,axi-port-mnoc not found.");
+ rc = -EINVAL;
+ goto mnoc_node_get_fail;
+ }
+ axi_port->axi_port_mnoc_node = axi_port_mnoc_node;
+
+ rc = cam_cpas_util_register_bus_client(soc_info,
+ axi_port_mnoc_node, &axi_port->mnoc_bus);
+ if (rc)
+ goto mnoc_register_fail;
+
+ if (soc_private->axi_camnoc_based) {
+ axi_port_camnoc_node = of_find_node_by_name(
+ axi_port_node, "qcom,axi-port-camnoc");
+ if (!axi_port_camnoc_node) {
+ CAM_ERR(CAM_CPAS,
+ "Node qcom,axi-port-camnoc not found");
+ rc = -EINVAL;
+ goto camnoc_node_get_fail;
+ }
+ axi_port->axi_port_camnoc_node = axi_port_camnoc_node;
+
+ rc = cam_cpas_util_register_bus_client(soc_info,
+ axi_port_camnoc_node, &axi_port->camnoc_bus);
+ if (rc)
+ goto camnoc_register_fail;
+ }
+
+ mutex_init(&axi_port->lock);
+
+ INIT_LIST_HEAD(&axi_port->sibling_port);
+ list_add_tail(&axi_port->sibling_port,
+ &cpas_core->axi_ports_list_head);
+ INIT_LIST_HEAD(&axi_port->clients_list_head);
+ }
+
+ return 0;
+camnoc_register_fail:
+ of_node_put(axi_port->axi_port_camnoc_node);
+camnoc_node_get_fail:
+ cam_cpas_util_unregister_bus_client(&axi_port->mnoc_bus);
+mnoc_register_fail:
+ of_node_put(axi_port->axi_port_mnoc_node);
+mnoc_node_get_fail:
+port_name_fail:
+ of_node_put(axi_port->axi_port_node);
+ kfree(axi_port);
+error_previous_axi_cleanup:
+ cam_cpas_util_axi_cleanup(cpas_core, soc_info);
+ return rc;
+}
+
+static int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw,
+ int enable)
+{
+ int rc;
+ struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+ struct cam_cpas_axi_port *curr_port;
+ struct cam_cpas_axi_port *temp_port;
+ uint64_t camnoc_bw, mnoc_bw;
+ struct cam_cpas_private_soc *soc_private =
+ (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+ rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+ (enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "Failed in AHB vote, enable=%d, rc=%d",
+ enable, rc);
+ return rc;
+ }
+
+ if (enable) {
+ mnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ } else {
+ mnoc_bw = 0;
+ camnoc_bw = 0;
+ }
+
+ list_for_each_entry_safe(curr_port, temp_port,
+ &cpas_core->axi_ports_list_head, sibling_port) {
+ rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
+ mnoc_bw, mnoc_bw, false);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote, enable=%d, rc=%d",
+ enable, rc);
+ goto remove_ahb_vote;
+ }
+
+ if (soc_private->axi_camnoc_based) {
+ cam_cpas_util_vote_bus_client_bw(
+ &curr_port->camnoc_bus, 0, camnoc_bw, true);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote, enable=%d, %d",
+ enable, rc);
+ cam_cpas_util_vote_bus_client_bw(
+ &curr_port->mnoc_bus, 0, 0, false);
+ goto remove_ahb_vote;
+ }
+ }
+ }
+
+ return 0;
+remove_ahb_vote:
+ cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+ CAM_SUSPEND_VOTE);
+ return rc;
+}
+
+static int cam_cpas_util_insert_client_to_axi_port(struct cam_cpas *cpas_core,
+ struct cam_cpas_private_soc *soc_private,
+ struct cam_cpas_client *cpas_client, int32_t client_indx)
+{
+ struct cam_cpas_axi_port *curr_port;
+ struct cam_cpas_axi_port *temp_port;
+
+ list_for_each_entry_safe(curr_port, temp_port,
+ &cpas_core->axi_ports_list_head, sibling_port) {
+ if (strnstr(curr_port->axi_port_name,
+ soc_private->client_axi_port_name[client_indx],
+ strlen(curr_port->axi_port_name))) {
+
+ cpas_client->axi_port = curr_port;
+ INIT_LIST_HEAD(&cpas_client->axi_sibling_client);
+
+ mutex_lock(&curr_port->lock);
+ list_add_tail(&cpas_client->axi_sibling_client,
+ &cpas_client->axi_port->clients_list_head);
+ mutex_unlock(&curr_port->lock);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void cam_cpas_util_remove_client_from_axi_port(
+ struct cam_cpas_client *cpas_client)
+{
+ mutex_lock(&cpas_client->axi_port->lock);
+ list_del(&cpas_client->axi_sibling_client);
+ mutex_unlock(&cpas_client->axi_port->lock);
+}
+
+static int cam_cpas_hw_reg_write(struct cam_hw_info *cpas_hw,
+ uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+ uint32_t offset, bool mb, uint32_t value)
+{
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
+ int reg_base_index = cpas_core->regbase_index[reg_base];
+ uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+ int rc = 0;
+
+ if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+ CAM_ERR(CAM_CPAS,
+ "Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
+ reg_base, reg_base_index, soc_info->num_reg_map);
+ return -EINVAL;
+ }
+
+ if (!CAM_CPAS_CLIENT_VALID(client_indx))
+ return -EINVAL;
+
+ mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
+
+ if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
+ rc = -EPERM;
+ goto unlock_client;
+ }
+
+ if (mb)
+ cam_io_w_mb(value,
+ soc_info->reg_map[reg_base_index].mem_base + offset);
+ else
+ cam_io_w(value,
+ soc_info->reg_map[reg_base_index].mem_base + offset);
+
+unlock_client:
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ return rc;
+}
+
+static int cam_cpas_hw_reg_read(struct cam_hw_info *cpas_hw,
+ uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+ uint32_t offset, bool mb, uint32_t *value)
+{
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
+ int reg_base_index = cpas_core->regbase_index[reg_base];
+ uint32_t reg_value;
+ uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+ int rc = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+ CAM_ERR(CAM_CPAS,
+ "Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
+ reg_base, reg_base_index, soc_info->num_reg_map);
+ return -EINVAL;
+ }
+
+ if (!CAM_CPAS_CLIENT_VALID(client_indx))
+ return -EINVAL;
+
+ mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
+
+ if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
+ rc = -EPERM;
+ goto unlock_client;
+ }
+
+ if (mb)
+ reg_value = cam_io_r_mb(
+ soc_info->reg_map[reg_base_index].mem_base + offset);
+ else
+ reg_value = cam_io_r(
+ soc_info->reg_map[reg_base_index].mem_base + offset);
+
+ *value = reg_value;
+
+unlock_client:
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ return rc;
+}
+
+static int cam_cpas_util_set_camnoc_axi_clk_rate(
+ struct cam_hw_info *cpas_hw)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_private_soc *soc_private =
+ (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+ struct cam_axi_vote consolidated_axi_vote;
+ int rc = 0;
+
+ CAM_DBG(CAM_CPAS, "control_camnoc_axi_clk=%d",
+ soc_private->control_camnoc_axi_clk);
+
+ if (soc_private->control_camnoc_axi_clk) {
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ struct cam_cpas_axi_port *curr_axi_port = NULL;
+ struct cam_cpas_axi_port *temp_axi_port = NULL;
+ uint64_t required_camnoc_bw = 0;
+ int32_t clk_rate = 0;
+
+ list_for_each_entry_safe(curr_axi_port, temp_axi_port,
+ &cpas_core->axi_ports_list_head, sibling_port) {
+ consolidated_axi_vote =
+ curr_axi_port->consolidated_axi_vote;
+
+ if (consolidated_axi_vote.uncompressed_bw
+ > required_camnoc_bw)
+ required_camnoc_bw =
+ consolidated_axi_vote.uncompressed_bw;
+
+ CAM_DBG(CAM_CPAS, "[%s] : curr=%llu, overal=%llu",
+ curr_axi_port->axi_port_name,
+ consolidated_axi_vote.uncompressed_bw,
+ required_camnoc_bw);
+ }
+
+ required_camnoc_bw += (required_camnoc_bw *
+ soc_private->camnoc_axi_clk_bw_margin) / 100;
+
+ if ((required_camnoc_bw > 0) &&
+ (required_camnoc_bw <
+ soc_private->camnoc_axi_min_ib_bw))
+ required_camnoc_bw = soc_private->camnoc_axi_min_ib_bw;
+
+ clk_rate = required_camnoc_bw / soc_private->camnoc_bus_width;
+
+ CAM_DBG(CAM_CPAS, "Setting camnoc axi clk rate : %llu %d",
+ required_camnoc_bw, clk_rate);
+
+ rc = cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+ if (rc)
+ CAM_ERR(CAM_CPAS,
+ "Failed in setting camnoc axi clk %llu %d %d",
+ required_camnoc_bw, clk_rate, rc);
+ }
+
+ return rc;
+}
+
+static int cam_cpas_util_apply_client_axi_vote(
+ struct cam_hw_info *cpas_hw,
+ struct cam_cpas_client *cpas_client,
+ struct cam_axi_vote *axi_vote)
+{
+ struct cam_cpas_private_soc *soc_private =
+ (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+ struct cam_cpas_client *curr_client;
+ struct cam_cpas_client *temp_client;
+ struct cam_axi_vote req_axi_vote = *axi_vote;
+ struct cam_cpas_axi_port *axi_port = cpas_client->axi_port;
+ uint64_t camnoc_bw = 0, mnoc_bw = 0;
+ int rc = 0;
+
+ if (!axi_port) {
+ CAM_ERR(CAM_CPAS, "axi port does not exists");
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure we use same bw for both compressed, uncompressed
+ * in case client has requested either of one only
+ */
+ if (req_axi_vote.compressed_bw == 0)
+ req_axi_vote.compressed_bw = req_axi_vote.uncompressed_bw;
+
+ if (req_axi_vote.uncompressed_bw == 0)
+ req_axi_vote.uncompressed_bw = req_axi_vote.compressed_bw;
+
+ if ((cpas_client->axi_vote.compressed_bw ==
+ req_axi_vote.compressed_bw) &&
+ (cpas_client->axi_vote.uncompressed_bw ==
+ req_axi_vote.uncompressed_bw))
+ return 0;
+
+ mutex_lock(&axi_port->lock);
+ cpas_client->axi_vote = req_axi_vote;
+
+ list_for_each_entry_safe(curr_client, temp_client,
+ &axi_port->clients_list_head, axi_sibling_client) {
+ camnoc_bw += curr_client->axi_vote.uncompressed_bw;
+ mnoc_bw += curr_client->axi_vote.compressed_bw;
+ }
+
+ if ((!soc_private->axi_camnoc_based) && (mnoc_bw < camnoc_bw))
+ mnoc_bw = camnoc_bw;
+
+ axi_port->consolidated_axi_vote.compressed_bw = mnoc_bw;
+ axi_port->consolidated_axi_vote.uncompressed_bw = camnoc_bw;
+
+ CAM_DBG(CAM_CPAS,
+ "axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]",
+ axi_port->mnoc_bus.src, axi_port->mnoc_bus.dst,
+ axi_port->camnoc_bus.src, axi_port->camnoc_bus.dst,
+ camnoc_bw, mnoc_bw);
+
+ rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
+ mnoc_bw, mnoc_bw, false);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
+ mnoc_bw, mnoc_bw, rc);
+ goto unlock_axi_port;
+ }
+
+ if (soc_private->axi_camnoc_based) {
+ rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
+ 0, camnoc_bw, true);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
+ (uint64_t)0, camnoc_bw, rc);
+ goto unlock_axi_port;
+ }
+ }
+
+ mutex_unlock(&axi_port->lock);
+
+ rc = cam_cpas_util_set_camnoc_axi_clk_rate(cpas_hw);
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in setting axi clk rate rc=%d", rc);
+
+ return rc;
+
+unlock_axi_port:
+ mutex_unlock(&axi_port->lock);
+ return rc;
+}
+
+static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
+ uint32_t client_handle, struct cam_axi_vote *client_axi_vote)
+{
+ struct cam_axi_vote axi_vote;
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
+ uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+ int rc = 0;
+
+ if (!client_axi_vote) {
+ CAM_ERR(CAM_CPAS, "Invalid arg client_handle=%d",
+ client_handle);
+ return -EINVAL;
+ }
+
+ axi_vote = *client_axi_vote;
+
+ if ((axi_vote.compressed_bw == 0) &&
+ (axi_vote.uncompressed_bw == 0)) {
+ CAM_DBG(CAM_CPAS, "0 vote from client_handle=%d",
+ client_handle);
+ axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ }
+
+ if (!CAM_CPAS_CLIENT_VALID(client_indx))
+ return -EINVAL;
+
+ mutex_lock(&cpas_hw->hw_mutex);
+ mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
+
+ if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
+ rc = -EPERM;
+ goto unlock_client;
+ }
+
+ CAM_DBG(CAM_PERF,
+ "Client=[%d][%s][%d] Requested compressed[%llu], uncompressed[%llu]",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, axi_vote.compressed_bw,
+ axi_vote.uncompressed_bw);
+
+ rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+ cpas_core->cpas_client[client_indx], &axi_vote);
+
+unlock_client:
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+ return rc;
+}
+
+static int cam_cpas_util_get_ahb_level(struct cam_hw_info *cpas_hw,
+ struct device *dev, unsigned long freq, enum cam_vote_level *req_level)
+{
+ struct cam_cpas_private_soc *soc_private =
+ (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+ struct dev_pm_opp *opp;
+ unsigned int corner;
+ enum cam_vote_level level = CAM_SVS_VOTE;
+ unsigned long corner_freq = freq;
+ int i;
+
+ if (!dev || !req_level) {
+ CAM_ERR(CAM_CPAS, "Invalid params %pK, %pK", dev, req_level);
+ return -EINVAL;
+ }
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
+ if (IS_ERR(opp)) {
+ CAM_DBG(CAM_CPAS, "OPP Ceil not available for freq :%ld, %pK",
+ corner_freq, opp);
+ *req_level = CAM_TURBO_VOTE;
+ return 0;
+ }
+
+ corner = dev_pm_opp_get_voltage(opp);
+
+ for (i = 0; i < soc_private->num_vdd_ahb_mapping; i++)
+ if (corner == soc_private->vdd_ahb[i].vdd_corner)
+ level = soc_private->vdd_ahb[i].ahb_level;
+
+ CAM_DBG(CAM_CPAS,
+ "From OPP table : freq=[%ld][%ld], corner=%d, level=%d",
+ freq, corner_freq, corner, level);
+
+ *req_level = level;
+
+ return 0;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
+ struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote,
+ enum cam_vote_level *applied_level)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
+ enum cam_vote_level required_level;
+ enum cam_vote_level highest_level;
+ int i, rc = 0;
+
+ if (!ahb_bus_client->valid) {
+ CAM_ERR(CAM_CPAS, "AHB Bus client not valid");
+ return -EINVAL;
+ }
+
+ if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
+ rc = cam_cpas_util_get_ahb_level(cpas_hw, cpas_client->data.dev,
+ ahb_vote->vote.freq, &required_level);
+ if (rc)
+ return rc;
+ } else {
+ required_level = ahb_vote->vote.level;
+ }
+
+ if (cpas_client->ahb_level == required_level)
+ return 0;
+
+ mutex_lock(&ahb_bus_client->lock);
+ cpas_client->ahb_level = required_level;
+
+ CAM_DBG(CAM_CPAS, "Client=[%d][%s] required level[%d], curr_level[%d]",
+ ahb_bus_client->client_id, ahb_bus_client->name,
+ required_level, ahb_bus_client->curr_vote_level);
+
+ if (required_level == ahb_bus_client->curr_vote_level)
+ goto unlock_bus_client;
+
+ highest_level = required_level;
+ for (i = 0; i < cpas_core->num_clients; i++) {
+ if (cpas_core->cpas_client[i] && (highest_level <
+ cpas_core->cpas_client[i]->ahb_level))
+ highest_level = cpas_core->cpas_client[i]->ahb_level;
+ }
+
+ CAM_DBG(CAM_CPAS, "Required highest_level[%d]", highest_level);
+
+ rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
+ highest_level);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "Failed in ahb vote, level=%d, rc=%d",
+ highest_level, rc);
+ goto unlock_bus_client;
+ }
+
+ rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, highest_level);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "Failed in scaling clock rate level %d for AHB",
+ highest_level);
+ goto unlock_bus_client;
+ }
+
+ if (applied_level)
+ *applied_level = highest_level;
+
+unlock_bus_client:
+ mutex_unlock(&ahb_bus_client->lock);
+ return rc;
+}
+
+static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
+ uint32_t client_handle, struct cam_ahb_vote *client_ahb_vote)
+{
+ struct cam_ahb_vote ahb_vote;
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
+ uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+ int rc = 0;
+
+ if (!client_ahb_vote) {
+ CAM_ERR(CAM_CPAS, "Invalid input arg");
+ return -EINVAL;
+ }
+
+ ahb_vote = *client_ahb_vote;
+
+ if (ahb_vote.vote.level == 0) {
+ CAM_DBG(CAM_CPAS, "0 ahb vote from client %d",
+ client_handle);
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ }
+
+ if (!CAM_CPAS_CLIENT_VALID(client_indx))
+ return -EINVAL;
+
+ mutex_lock(&cpas_hw->hw_mutex);
+ mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
+
+ if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
+ rc = -EPERM;
+ goto unlock_client;
+ }
+
+ CAM_DBG(CAM_PERF,
+ "client=[%d][%s][%d] : type[%d], level[%d], freq[%ld], applied[%d]",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, ahb_vote.type,
+ ahb_vote.vote.level, ahb_vote.vote.freq,
+ cpas_core->cpas_client[client_indx]->ahb_level);
+
+ rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
+ cpas_core->cpas_client[client_indx], &ahb_vote, NULL);
+
+unlock_client:
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+ return rc;
+}
+
+static int cam_cpas_hw_start(void *hw_priv, void *start_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *cpas_hw;
+ struct cam_cpas *cpas_core;
+ uint32_t client_indx;
+ struct cam_cpas_hw_cmd_start *cmd_hw_start;
+ struct cam_cpas_client *cpas_client;
+ struct cam_ahb_vote *ahb_vote;
+ struct cam_axi_vote *axi_vote;
+ enum cam_vote_level applied_level = CAM_SVS_VOTE;
+ int rc;
+ struct cam_cpas_private_soc *soc_private = NULL;
+
+ if (!hw_priv || !start_args) {
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, start_args);
+ return -EINVAL;
+ }
+
+ if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+ sizeof(struct cam_cpas_hw_cmd_start), arg_size);
+ return -EINVAL;
+ }
+
+ cpas_hw = (struct cam_hw_info *)hw_priv;
+ cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ soc_private = (struct cam_cpas_private_soc *)
+ cpas_hw->soc_info.soc_private;
+ cmd_hw_start = (struct cam_cpas_hw_cmd_start *)start_args;
+ client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_start->client_handle);
+ ahb_vote = cmd_hw_start->ahb_vote;
+ axi_vote = cmd_hw_start->axi_vote;
+
+ if (!ahb_vote || !axi_vote)
+ return -EINVAL;
+
+ if ((ahb_vote->vote.level == 0) || ((axi_vote->compressed_bw == 0) &&
+ (axi_vote->uncompressed_bw == 0))) {
+ CAM_ERR(CAM_CPAS, "Invalid vote ahb[%d], axi[%llu], [%llu]",
+ ahb_vote->vote.level, axi_vote->compressed_bw,
+ axi_vote->uncompressed_bw);
+ return -EINVAL;
+ }
+
+ if (!CAM_CPAS_CLIENT_VALID(client_indx))
+ return -EINVAL;
+
+ mutex_lock(&cpas_hw->hw_mutex);
+ mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
+
+ if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "client=[%d] is not registered",
+ client_indx);
+ rc = -EPERM;
+ goto done;
+ }
+
+ if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] is in start state",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
+ rc = -EPERM;
+ goto done;
+ }
+
+ CAM_DBG(CAM_CPAS,
+ "AHB :client=[%d][%s][%d] type[%d], level[%d], applied[%d]",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index,
+ ahb_vote->type, ahb_vote->vote.level, cpas_client->ahb_level);
+ rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
+ ahb_vote, &applied_level);
+ if (rc)
+ goto done;
+
+ CAM_DBG(CAM_CPAS,
+ "AXI client=[%d][%s][%d] compressed_bw[%llu], uncompressed_bw[%llu]",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, axi_vote->compressed_bw,
+ axi_vote->uncompressed_bw);
+ rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+ cpas_client, axi_vote);
+ if (rc)
+ goto done;
+
+ if (cpas_core->streamon_clients == 0) {
+ atomic_set(&cpas_core->irq_count, 1);
+ rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
+ applied_level);
+ if (rc) {
+ atomic_set(&cpas_core->irq_count, 0);
+ CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc);
+ goto done;
+ }
+
+ if (cpas_core->internal_ops.power_on) {
+ rc = cpas_core->internal_ops.power_on(cpas_hw);
+ if (rc) {
+ atomic_set(&cpas_core->irq_count, 0);
+ cam_cpas_soc_disable_resources(
+ &cpas_hw->soc_info, true, true);
+ CAM_ERR(CAM_CPAS,
+ "failed in power_on settings rc=%d",
+ rc);
+ goto done;
+ }
+ }
+ CAM_DBG(CAM_CPAS, "irq_count=%d\n",
+ atomic_read(&cpas_core->irq_count));
+ cpas_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ }
+
+ cpas_client->started = true;
+ cpas_core->streamon_clients++;
+
+ CAM_DBG(CAM_CPAS, "client=[%d][%s][%d] streamon_clients=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, cpas_core->streamon_clients);
+done:
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+ return rc;
+}
+
+static int _check_irq_count(struct cam_cpas *cpas_core)
+{
+ return (atomic_read(&cpas_core->irq_count) > 0) ? 0 : 1;
+}
+
+static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *cpas_hw;
+ struct cam_cpas *cpas_core;
+ uint32_t client_indx;
+ struct cam_cpas_hw_cmd_stop *cmd_hw_stop;
+ struct cam_cpas_client *cpas_client;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ struct cam_cpas_private_soc *soc_private = NULL;
+ int rc = 0;
+ long result;
+
+ if (!hw_priv || !stop_args) {
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, stop_args);
+ return -EINVAL;
+ }
+
+ if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+ sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
+ return -EINVAL;
+ }
+
+ cpas_hw = (struct cam_hw_info *)hw_priv;
+ cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ soc_private = (struct cam_cpas_private_soc *)
+ cpas_hw->soc_info.soc_private;
+ cmd_hw_stop = (struct cam_cpas_hw_cmd_stop *)stop_args;
+ client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_stop->client_handle);
+
+ if (!CAM_CPAS_CLIENT_VALID(client_indx))
+ return -EINVAL;
+
+ mutex_lock(&cpas_hw->hw_mutex);
+ mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
+
+ CAM_DBG(CAM_CPAS, "Client=[%d][%s][%d] streamon_clients=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, cpas_core->streamon_clients);
+
+ if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not started",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
+ rc = -EPERM;
+ goto done;
+ }
+
+ cpas_client->started = false;
+ cpas_core->streamon_clients--;
+
+ if (cpas_core->streamon_clients == 0) {
+ if (cpas_core->internal_ops.power_off) {
+ rc = cpas_core->internal_ops.power_off(cpas_hw);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "failed in power_off settings rc=%d",
+ rc);
+ /* Do not return error, passthrough */
+ }
+ }
+
+ rc = cam_cpas_soc_disable_irq(&cpas_hw->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "disable_irq failed, rc=%d", rc);
+ goto done;
+ }
+
+ /* Wait for any IRQs still being handled */
+ atomic_dec(&cpas_core->irq_count);
+ result = wait_event_timeout(cpas_core->irq_count_wq,
+ _check_irq_count(cpas_core), HZ);
+ if (result == 0) {
+ CAM_ERR(CAM_CPAS, "Wait failed: irq_count=%d",
+ atomic_read(&cpas_core->irq_count));
+ }
+
+ rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info,
+ true, false);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc);
+ goto done;
+ }
+ CAM_DBG(CAM_CPAS, "Disabled all the resources: irq_count=%d\n",
+ atomic_read(&cpas_core->irq_count));
+ cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ }
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SUSPEND_VOTE;
+ rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
+ &ahb_vote, NULL);
+ if (rc)
+ goto done;
+
+ axi_vote.uncompressed_bw = 0;
+ axi_vote.compressed_bw = 0;
+ rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+ cpas_client, &axi_vote);
+
+done:
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+ return rc;
+}
+
+static int cam_cpas_hw_init(void *hw_priv, void *init_hw_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *cpas_hw;
+ struct cam_cpas *cpas_core;
+ int rc = 0;
+
+ if (!hw_priv || !init_hw_args) {
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, init_hw_args);
+ return -EINVAL;
+ }
+
+ if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+ CAM_ERR(CAM_CPAS, "INIT HW size mismatch %zd %d",
+ sizeof(struct cam_cpas_hw_caps), arg_size);
+ return -EINVAL;
+ }
+
+ cpas_hw = (struct cam_hw_info *)hw_priv;
+ cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+ if (cpas_core->internal_ops.init_hw_version) {
+ rc = cpas_core->internal_ops.init_hw_version(cpas_hw,
+ (struct cam_cpas_hw_caps *)init_hw_args);
+ }
+
+ return rc;
+}
+
+static int cam_cpas_hw_register_client(struct cam_hw_info *cpas_hw,
+ struct cam_cpas_register_params *register_params)
+{
+ int rc;
+ struct cam_cpas_client *cpas_client;
+ char client_name[CAM_HW_IDENTIFIER_LENGTH + 3];
+ int32_t client_indx = -1;
+ struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+ struct cam_cpas_private_soc *soc_private =
+ (struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+ CAM_DBG(CAM_CPAS, "Register params : identifier=%s, cell_index=%d",
+ register_params->identifier, register_params->cell_index);
+
+ if (soc_private->client_id_based)
+ snprintf(client_name, sizeof(client_name), "%s%d",
+ register_params->identifier,
+ register_params->cell_index);
+ else
+ snprintf(client_name, sizeof(client_name), "%s",
+ register_params->identifier);
+
+ mutex_lock(&cpas_hw->hw_mutex);
+
+ rc = cam_common_util_get_string_index(soc_private->client_name,
+ soc_private->num_clients, client_name, &client_indx);
+
+ mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+ if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
+ CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS,
+ "Inval client %s %d : %d %d %pK %d",
+ register_params->identifier,
+ register_params->cell_index,
+ CAM_CPAS_CLIENT_VALID(client_indx),
+ CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx),
+ cpas_core->cpas_client[client_indx], rc);
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+ return -EPERM;
+ }
+
+ cpas_client = kzalloc(sizeof(struct cam_cpas_client), GFP_KERNEL);
+ if (!cpas_client) {
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+ return -ENOMEM;
+ }
+
+ rc = cam_cpas_util_insert_client_to_axi_port(cpas_core, soc_private,
+ cpas_client, client_indx);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "axi_port_insert failed Client=[%d][%s][%d], rc=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, rc);
+ kfree(cpas_client);
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+ return -EINVAL;
+ }
+
+ register_params->client_handle =
+ CAM_CPAS_GET_CLIENT_HANDLE(client_indx);
+ memcpy(&cpas_client->data, register_params,
+ sizeof(struct cam_cpas_register_params));
+ cpas_core->cpas_client[client_indx] = cpas_client;
+ cpas_core->registered_clients++;
+
+ CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, cpas_core->registered_clients);
+
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+
+ return 0;
+}
+
+static int cam_cpas_hw_unregister_client(struct cam_hw_info *cpas_hw,
+ uint32_t client_handle)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_client *cpas_client = NULL;
+ uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+ int rc = 0;
+
+ if (!CAM_CPAS_CLIENT_VALID(client_indx))
+ return -EINVAL;
+
+ mutex_lock(&cpas_hw->hw_mutex);
+ mutex_lock(&cpas_core->client_mutex[client_indx]);
+ cpas_client = cpas_core->cpas_client[client_indx];
+
+ if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] not registered",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
+ rc = -EPERM;
+ goto done;
+ }
+
+ if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+ CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not stopped",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index);
+ rc = -EPERM;
+ goto done;
+ }
+
+ cam_cpas_util_remove_client_from_axi_port(
+ cpas_core->cpas_client[client_indx]);
+
+ CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+ client_indx, cpas_client->data.identifier,
+ cpas_client->data.cell_index, cpas_core->registered_clients);
+
+ kfree(cpas_core->cpas_client[client_indx]);
+ cpas_core->cpas_client[client_indx] = NULL;
+ cpas_core->registered_clients--;
+done:
+ mutex_unlock(&cpas_core->client_mutex[client_indx]);
+ mutex_unlock(&cpas_hw->hw_mutex);
+ return rc;
+}
+
+static int cam_cpas_hw_get_hw_info(void *hw_priv,
+ void *get_hw_cap_args, uint32_t arg_size)
+{
+ struct cam_hw_info *cpas_hw;
+ struct cam_cpas *cpas_core;
+ struct cam_cpas_hw_caps *hw_caps;
+
+ if (!hw_priv || !get_hw_cap_args) {
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, get_hw_cap_args);
+ return -EINVAL;
+ }
+
+ if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+ sizeof(struct cam_cpas_hw_caps), arg_size);
+ return -EINVAL;
+ }
+
+ cpas_hw = (struct cam_hw_info *)hw_priv;
+ cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ hw_caps = (struct cam_cpas_hw_caps *)get_hw_cap_args;
+
+ *hw_caps = cpas_core->hw_caps;
+
+ return 0;
+}
+
+
+static int cam_cpas_hw_process_cmd(void *hw_priv,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+ int rc = -EINVAL;
+
+ if (!hw_priv || !cmd_args ||
+ (cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK %d",
+ hw_priv, cmd_args, cmd_type);
+ return -EINVAL;
+ }
+
+ switch (cmd_type) {
+ case CAM_CPAS_HW_CMD_REGISTER_CLIENT: {
+ struct cam_cpas_register_params *register_params;
+
+ if (sizeof(struct cam_cpas_register_params) != arg_size) {
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ register_params = (struct cam_cpas_register_params *)cmd_args;
+ rc = cam_cpas_hw_register_client(hw_priv, register_params);
+ break;
+ }
+ case CAM_CPAS_HW_CMD_UNREGISTER_CLIENT: {
+ uint32_t *client_handle;
+
+ if (sizeof(uint32_t) != arg_size) {
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ client_handle = (uint32_t *)cmd_args;
+ rc = cam_cpas_hw_unregister_client(hw_priv, *client_handle);
+ break;
+ }
+ case CAM_CPAS_HW_CMD_REG_WRITE: {
+ struct cam_cpas_hw_cmd_reg_read_write *reg_write;
+
+ if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+ arg_size) {
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ reg_write =
+ (struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+ rc = cam_cpas_hw_reg_write(hw_priv, reg_write->client_handle,
+ reg_write->reg_base, reg_write->offset, reg_write->mb,
+ reg_write->value);
+ break;
+ }
+ case CAM_CPAS_HW_CMD_REG_READ: {
+ struct cam_cpas_hw_cmd_reg_read_write *reg_read;
+
+ if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+ arg_size) {
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ reg_read =
+ (struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+ rc = cam_cpas_hw_reg_read(hw_priv,
+ reg_read->client_handle, reg_read->reg_base,
+ reg_read->offset, reg_read->mb, &reg_read->value);
+
+ break;
+ }
+ case CAM_CPAS_HW_CMD_AHB_VOTE: {
+ struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
+
+ if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ cmd_ahb_vote = (struct cam_cpas_hw_cmd_ahb_vote *)cmd_args;
+ rc = cam_cpas_hw_update_ahb_vote(hw_priv,
+ cmd_ahb_vote->client_handle, cmd_ahb_vote->ahb_vote);
+ break;
+ }
+ case CAM_CPAS_HW_CMD_AXI_VOTE: {
+ struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
+
+ if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ cmd_axi_vote = (struct cam_cpas_hw_cmd_axi_vote *)cmd_args;
+ rc = cam_cpas_hw_update_axi_vote(hw_priv,
+ cmd_axi_vote->client_handle, cmd_axi_vote->axi_vote);
+ break;
+ }
+ default:
+ CAM_ERR(CAM_CPAS, "CPAS HW command not valid =%d", cmd_type);
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_cpas_util_client_setup(struct cam_hw_info *cpas_hw)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ int i;
+
+ for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
+ mutex_init(&cpas_core->client_mutex[i]);
+ cpas_core->cpas_client[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ int i;
+
+ for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
+ if (cpas_core->cpas_client[i]) {
+ cam_cpas_hw_unregister_client(cpas_hw, i);
+ cpas_core->cpas_client[i] = NULL;
+ }
+ mutex_destroy(&cpas_core->client_mutex[i]);
+ }
+
+ return 0;
+}
+
+static int cam_cpas_util_get_internal_ops(struct platform_device *pdev,
+ struct cam_hw_intf *hw_intf, struct cam_cpas_internal_ops *internal_ops)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc;
+ const char *compat_str = NULL;
+
+ rc = of_property_read_string_index(of_node, "arch-compat", 0,
+ (const char **)&compat_str);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed to get arch-compat rc=%d", rc);
+ return -EINVAL;
+ }
+
+ if (strnstr(compat_str, "camss_top", strlen(compat_str))) {
+ hw_intf->hw_type = CAM_HW_CAMSSTOP;
+ rc = cam_camsstop_get_internal_ops(internal_ops);
+ } else if (strnstr(compat_str, "cpas_top", strlen(compat_str))) {
+ hw_intf->hw_type = CAM_HW_CPASTOP;
+ rc = cam_cpastop_get_internal_ops(internal_ops);
+ } else {
+ CAM_ERR(CAM_CPAS, "arch-compat %s not supported", compat_str);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+ struct cam_hw_intf **hw_intf)
+{
+ int rc = 0;
+ int i;
+ struct cam_hw_info *cpas_hw = NULL;
+ struct cam_hw_intf *cpas_hw_intf = NULL;
+ struct cam_cpas *cpas_core = NULL;
+ struct cam_cpas_private_soc *soc_private;
+ struct cam_cpas_internal_ops *internal_ops;
+
+ cpas_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!cpas_hw_intf)
+ return -ENOMEM;
+
+ cpas_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!cpas_hw) {
+ kfree(cpas_hw_intf);
+ return -ENOMEM;
+ }
+
+ cpas_core = kzalloc(sizeof(struct cam_cpas), GFP_KERNEL);
+ if (!cpas_core) {
+ kfree(cpas_hw);
+ kfree(cpas_hw_intf);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < CAM_CPAS_REG_MAX; i++)
+ cpas_core->regbase_index[i] = -1;
+
+ cpas_hw_intf->hw_priv = cpas_hw;
+ cpas_hw->core_info = cpas_core;
+
+ cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ cpas_hw->soc_info.pdev = pdev;
+ cpas_hw->soc_info.dev = &pdev->dev;
+ cpas_hw->soc_info.dev_name = pdev->name;
+ cpas_hw->open_count = 0;
+ mutex_init(&cpas_hw->hw_mutex);
+ spin_lock_init(&cpas_hw->hw_lock);
+ init_completion(&cpas_hw->hw_complete);
+
+ cpas_hw_intf->hw_ops.get_hw_caps = cam_cpas_hw_get_hw_info;
+ cpas_hw_intf->hw_ops.init = cam_cpas_hw_init;
+ cpas_hw_intf->hw_ops.deinit = NULL;
+ cpas_hw_intf->hw_ops.reset = NULL;
+ cpas_hw_intf->hw_ops.reserve = NULL;
+ cpas_hw_intf->hw_ops.release = NULL;
+ cpas_hw_intf->hw_ops.start = cam_cpas_hw_start;
+ cpas_hw_intf->hw_ops.stop = cam_cpas_hw_stop;
+ cpas_hw_intf->hw_ops.read = NULL;
+ cpas_hw_intf->hw_ops.write = NULL;
+ cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
+
+ cpas_core->work_queue = alloc_workqueue("cam-cpas",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, CAM_CPAS_INFLIGHT_WORKS);
+ if (!cpas_core->work_queue) {
+ rc = -ENOMEM;
+ goto release_mem;
+ }
+
+ internal_ops = &cpas_core->internal_ops;
+ rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
+ if (rc)
+ goto release_workq;
+
+ rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
+ internal_ops->handle_irq, cpas_hw);
+ if (rc)
+ goto release_workq;
+
+ soc_private = (struct cam_cpas_private_soc *)
+ cpas_hw->soc_info.soc_private;
+ cpas_core->num_clients = soc_private->num_clients;
+ atomic_set(&cpas_core->irq_count, 0);
+ init_waitqueue_head(&cpas_core->irq_count_wq);
+
+ if (internal_ops->setup_regbase) {
+ rc = internal_ops->setup_regbase(&cpas_hw->soc_info,
+ cpas_core->regbase_index, CAM_CPAS_REG_MAX);
+ if (rc)
+ goto deinit_platform_res;
+ }
+
+ rc = cam_cpas_util_client_setup(cpas_hw);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in client setup, rc=%d", rc);
+ goto deinit_platform_res;
+ }
+
+ rc = cam_cpas_util_register_bus_client(&cpas_hw->soc_info,
+ cpas_hw->soc_info.pdev->dev.of_node,
+ &cpas_core->ahb_bus_client);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in ahb setup, rc=%d", rc);
+ goto client_cleanup;
+ }
+
+ rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in axi setup, rc=%d", rc);
+ goto ahb_cleanup;
+ }
+
+ /* Need to vote first before enabling clocks */
+ rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true);
+ if (rc)
+ goto axi_cleanup;
+
+ rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, CAM_SVS_VOTE);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc);
+ goto remove_default_vote;
+ }
+
+ if (internal_ops->get_hw_info) {
+ rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in get_hw_info, rc=%d", rc);
+ goto disable_soc_res;
+ }
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid get_hw_info");
+ goto disable_soc_res;
+ }
+
+ rc = cam_cpas_hw_init(cpas_hw_intf->hw_priv,
+ &cpas_core->hw_caps, sizeof(struct cam_cpas_hw_caps));
+ if (rc)
+ goto disable_soc_res;
+
+ rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc);
+ goto remove_default_vote;
+ }
+
+ rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+ if (rc)
+ goto axi_cleanup;
+
+ *hw_intf = cpas_hw_intf;
+ return 0;
+
+disable_soc_res:
+ cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
+remove_default_vote:
+ cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+axi_cleanup:
+ cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+ahb_cleanup:
+ cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+client_cleanup:
+ cam_cpas_util_client_cleanup(cpas_hw);
+deinit_platform_res:
+ cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_workq:
+ flush_workqueue(cpas_core->work_queue);
+ destroy_workqueue(cpas_core->work_queue);
+release_mem:
+ mutex_destroy(&cpas_hw->hw_mutex);
+ kfree(cpas_core);
+ kfree(cpas_hw);
+ kfree(cpas_hw_intf);
+ CAM_ERR(CAM_CPAS, "failed in hw probe");
+ return rc;
+}
+
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf)
+{
+ struct cam_hw_info *cpas_hw;
+ struct cam_cpas *cpas_core;
+
+ if (!cpas_hw_intf) {
+ CAM_ERR(CAM_CPAS, "cpas interface not initialized");
+ return -EINVAL;
+ }
+
+ cpas_hw = (struct cam_hw_info *)cpas_hw_intf->hw_priv;
+ cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+ if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
+ CAM_ERR(CAM_CPAS, "cpas hw is in power up state");
+ return -EINVAL;
+ }
+
+ cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+ cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+ cam_cpas_util_client_cleanup(cpas_hw);
+ cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+ flush_workqueue(cpas_core->work_queue);
+ destroy_workqueue(cpas_core->work_queue);
+ mutex_destroy(&cpas_hw->hw_mutex);
+ kfree(cpas_core);
+ kfree(cpas_hw);
+ kfree(cpas_hw_intf);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw.h
new file mode 100644
index 000000000000..f93d036ecfcf
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw.h
@@ -0,0 +1,208 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_HW_H_
+#define _CAM_CPAS_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_common_util.h"
+
+#define CAM_CPAS_MAX_CLIENTS 30
+#define CAM_CPAS_INFLIGHT_WORKS 5
+
+#define CAM_CPAS_AXI_MIN_MNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_IB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000UL)
+
+#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
+#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
+
+#define CAM_CPAS_CLIENT_VALID(indx) \
+ ((indx >= 0) && (indx < CAM_CPAS_MAX_CLIENTS))
+#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx) \
+ ((CAM_CPAS_CLIENT_VALID(indx)) && \
+ (cpas_core->cpas_client[indx]))
+#define CAM_CPAS_CLIENT_STARTED(cpas_core, indx) \
+ ((CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)) && \
+ (cpas_core->cpas_client[indx]->started))
+
+/**
+ * enum cam_cpas_access_type - Enum for Register access type
+ */
+enum cam_cpas_access_type {
+ CAM_REG_TYPE_READ,
+ CAM_REG_TYPE_WRITE,
+ CAM_REG_TYPE_READ_WRITE,
+};
+
+/**
+ * struct cam_cpas_internal_ops - CPAS Hardware layer internal ops
+ *
+ * @get_hw_info: Function pointer for get hw info
+ * @init_hw_version: Function pointer for hw init based on version
+ * @handle_irq: Function poniter for irq handling
+ * @setup_regbase: Function pointer for setup rebase indices
+ * @power_on: Function pointer for hw core specific power on settings
+ * @power_off: Function pointer for hw core specific power off settings
+ *
+ */
+struct cam_cpas_internal_ops {
+ int (*get_hw_info)(struct cam_hw_info *cpas_hw,
+ struct cam_cpas_hw_caps *hw_caps);
+ int (*init_hw_version)(struct cam_hw_info *cpas_hw,
+ struct cam_cpas_hw_caps *hw_caps);
+ irqreturn_t (*handle_irq)(int irq_num, void *data);
+ int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
+ int32_t regbase_index[], int32_t num_reg_map);
+ int (*power_on)(struct cam_hw_info *cpas_hw);
+ int (*power_off)(struct cam_hw_info *cpas_hw);
+};
+
+/**
+ * struct cam_cpas_reg : CPAS register info
+ *
+ * @enable: Whether this reg info need to be enabled
+ * @access_type: Register access type
+ * @masked_value: Whether this register write/read is based on mask, shift
+ * @mask: Mask for this register value
+ * @shift: Shift for this register value
+ * @value: Register value
+ *
+ */
+struct cam_cpas_reg {
+ bool enable;
+ enum cam_cpas_access_type access_type;
+ bool masked_value;
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t value;
+};
+
+/**
+ * struct cam_cpas_client : CPAS Client structure info
+ *
+ * @data: Client register params
+ * @started: Whether client has streamed on
+ * @ahb_level: Determined/Applied ahb level for the client
+ * @axi_vote: Determined/Applied axi vote for the client
+ * @axi_port: Client's parent axi port
+ * @axi_sibling_client: Client's sibllings sharing the same axi port
+ *
+ */
+struct cam_cpas_client {
+ struct cam_cpas_register_params data;
+ bool started;
+ enum cam_vote_level ahb_level;
+ struct cam_axi_vote axi_vote;
+ struct cam_cpas_axi_port *axi_port;
+ struct list_head axi_sibling_client;
+};
+
+/**
+ * struct cam_cpas_bus_client : Bus client information
+ *
+ * @src: Bus master/src id
+ * @dst: Bus slave/dst id
+ * @pdata: Bus pdata information
+ * @client_id: Bus client id
+ * @num_usecases: Number of use cases for this client
+ * @num_paths: Number of paths for this client
+ * @curr_vote_level: current voted index
+ * @dyn_vote: Whether dynamic voting enabled
+ * @lock: Mutex lock used while voting on this client
+ * @valid: Whether bus client is valid
+ * @name: Name of the bus client
+ *
+ */
+struct cam_cpas_bus_client {
+ int src;
+ int dst;
+ struct msm_bus_scale_pdata *pdata;
+ uint32_t client_id;
+ int num_usecases;
+ int num_paths;
+ unsigned int curr_vote_level;
+ bool dyn_vote;
+ struct mutex lock;
+ bool valid;
+ const char *name;
+};
+
+/**
+ * struct cam_cpas_axi_port : AXI port information
+ *
+ * @sibling_port: Sibling AXI ports
+ * @clients_list_head: List head pointing to list of clients sharing this port
+ * @lock: Mutex lock for accessing this port
+ * @camnoc_bus: CAMNOC bus client info for this port
+ * @mnoc_bus: MNOC bus client info for this port
+ * @axi_port_name: Name of this AXI port
+ * @axi_port_node: Node representing this AXI Port
+ * @axi_port_mnoc_node: Node representing mnoc in this AXI Port
+ * @axi_port_camnoc_node: Node representing camnoc in this AXI Port
+ * @consolidated_axi_vote: Consolidated axi bw values for this AXI port
+ */
+struct cam_cpas_axi_port {
+ struct list_head sibling_port;
+ struct list_head clients_list_head;
+ struct mutex lock;
+ struct cam_cpas_bus_client camnoc_bus;
+ struct cam_cpas_bus_client mnoc_bus;
+ const char *axi_port_name;
+ struct device_node *axi_port_node;
+ struct device_node *axi_port_mnoc_node;
+ struct device_node *axi_port_camnoc_node;
+ struct cam_axi_vote consolidated_axi_vote;
+};
+
+/**
+ * struct cam_cpas : CPAS core data structure info
+ *
+ * @hw_caps: CPAS hw capabilities
+ * @cpas_client: Array of pointers to CPAS clients info
+ * @client_mutex: Mutex for accessing client info
+ * @num_clients: Total number of clients that CPAS supports
+ * @registered_clients: Number of Clients registered currently
+ * @streamon_clients: Number of Clients that are in start state currently
+ * @regbase_index: Register base indices for CPAS register base IDs
+ * @ahb_bus_client: AHB Bus client info
+ * @axi_ports_list_head: Head pointing to list of AXI ports
+ * @internal_ops: CPAS HW internal ops
+ * @work_queue: Work queue handle
+ *
+ */
+struct cam_cpas {
+ struct cam_cpas_hw_caps hw_caps;
+ struct cam_cpas_client *cpas_client[CAM_CPAS_MAX_CLIENTS];
+ struct mutex client_mutex[CAM_CPAS_MAX_CLIENTS];
+ uint32_t num_clients;
+ uint32_t registered_clients;
+ uint32_t streamon_clients;
+ int32_t regbase_index[CAM_CPAS_REG_MAX];
+ struct cam_cpas_bus_client ahb_bus_client;
+ struct list_head axi_ports_list_head;
+ struct cam_cpas_internal_ops internal_ops;
+ struct workqueue_struct *work_queue;
+ atomic_t irq_count;
+ wait_queue_head_t irq_count_wq;
+};
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+ enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
+
+#endif /* _CAM_CPAS_HW_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw_intf.h
new file mode 100644
index 000000000000..181659e41cec
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_hw_intf.h
@@ -0,0 +1,135 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_HW_INTF_H_
+#define _CAM_CPAS_HW_INTF_H_
+
+#include <linux/platform_device.h>
+
+#include "cam_cpas_api.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_debug_util.h"
+
+/* Number of times to retry while polling */
+#define CAM_CPAS_POLL_RETRY_CNT 5
+/* Minimum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MIN_USECS 200
+/* Maximum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MAX_USECS 250
+
+/**
+ * enum cam_cpas_hw_type - Enum for CPAS HW type
+ */
+enum cam_cpas_hw_type {
+ CAM_HW_CPASTOP,
+ CAM_HW_CAMSSTOP,
+};
+
+/**
+ * enum cam_cpas_hw_cmd_process - Enum for CPAS HW process command type
+ */
+enum cam_cpas_hw_cmd_process {
+ CAM_CPAS_HW_CMD_REGISTER_CLIENT,
+ CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+ CAM_CPAS_HW_CMD_REG_WRITE,
+ CAM_CPAS_HW_CMD_REG_READ,
+ CAM_CPAS_HW_CMD_AHB_VOTE,
+ CAM_CPAS_HW_CMD_AXI_VOTE,
+ CAM_CPAS_HW_CMD_INVALID,
+};
+
+/**
+ * struct cam_cpas_hw_cmd_reg_read_write : CPAS cmd struct for reg read, write
+ *
+ * @client_handle: Client handle
+ * @reg_base: Register base type
+ * @offset: Register offset
+ * @value: Register value
+ * @mb: Whether to do operation with memory barrier
+ *
+ */
+struct cam_cpas_hw_cmd_reg_read_write {
+ uint32_t client_handle;
+ enum cam_cpas_reg_base reg_base;
+ uint32_t offset;
+ uint32_t value;
+ bool mb;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_ahb_vote : CPAS cmd struct for AHB vote
+ *
+ * @client_handle: Client handle
+ * @ahb_vote: AHB voting info
+ *
+ */
+struct cam_cpas_hw_cmd_ahb_vote {
+ uint32_t client_handle;
+ struct cam_ahb_vote *ahb_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_axi_vote : CPAS cmd struct for AXI vote
+ *
+ * @client_handle: Client handle
+ * @axi_vote: axi bandwidth vote
+ *
+ */
+struct cam_cpas_hw_cmd_axi_vote {
+ uint32_t client_handle;
+ struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_start : CPAS cmd struct for start
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_start {
+ uint32_t client_handle;
+ struct cam_ahb_vote *ahb_vote;
+ struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_stop : CPAS cmd struct for stop
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_stop {
+ uint32_t client_handle;
+};
+
+/**
+ * struct cam_cpas_hw_caps : CPAS HW capabilities
+ *
+ * @camera_family: Camera family type
+ * @camera_version: Camera version
+ * @cpas_version: CPAS version
+ * @camera_capability: Camera hw capabilities
+ *
+ */
+struct cam_cpas_hw_caps {
+ uint32_t camera_family;
+ struct cam_hw_version camera_version;
+ struct cam_hw_version cpas_version;
+ uint32_t camera_capability;
+};
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+ struct cam_hw_intf **hw_intf);
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf);
+
+#endif /* _CAM_CPAS_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_intf.c
new file mode 100644
index 000000000000..a9f1e4f8364e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_intf.c
@@ -0,0 +1,662 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CAM_CPAS_DEV_NAME "cam-cpas"
+#define CAM_CPAS_INTF_INITIALIZED() (g_cpas_intf && g_cpas_intf->probe_done)
+
+/**
+ * struct cam_cpas_intf : CPAS interface
+ *
+ * @pdev: Platform device
+ * @subdev: Subdev info
+ * @hw_intf: CPAS HW interface
+ * @hw_caps: CPAS HW capabilities
+ * @intf_lock: CPAS interface mutex
+ * @open_cnt: CPAS subdev open count
+ * @probe_done: Whether CPAS prove completed
+ *
+ */
+struct cam_cpas_intf {
+ struct platform_device *pdev;
+ struct cam_subdev subdev;
+ struct cam_hw_intf *hw_intf;
+ struct cam_cpas_hw_caps hw_caps;
+ struct mutex intf_lock;
+ uint32_t open_cnt;
+ bool probe_done;
+};
+
+static struct cam_cpas_intf *g_cpas_intf;
+
+int cam_cpas_get_cpas_hw_version(uint32_t *hw_version)
+{
+ struct cam_hw_info *cpas_hw = NULL;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (!hw_version) {
+ CAM_ERR(CAM_CPAS, "invalid input %pK", hw_version);
+ return -EINVAL;
+ }
+
+ cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv;
+
+ *hw_version = cpas_hw->soc_info.hw_version;
+
+ if (*hw_version == CAM_CPAS_TITAN_NONE) {
+ CAM_DBG(CAM_CPAS, "Didn't find a valid HW Version %d",
+ *hw_version);
+ }
+
+ return 0;
+}
+
+
+int cam_cpas_get_hw_info(uint32_t *camera_family,
+ struct cam_hw_version *camera_version,
+ struct cam_hw_version *cpas_version,
+ uint32_t *cam_caps)
+{
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (!camera_family || !camera_version || !cpas_version || !cam_caps) {
+ CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK %pK",
+ camera_family, camera_version, cpas_version, cam_caps);
+ return -EINVAL;
+ }
+
+ *camera_family = g_cpas_intf->hw_caps.camera_family;
+ *camera_version = g_cpas_intf->hw_caps.camera_version;
+ *cpas_version = g_cpas_intf->hw_caps.cpas_version;
+ *cam_caps = g_cpas_intf->hw_caps.camera_capability;
+
+ return 0;
+}
+EXPORT_SYMBOL(cam_cpas_get_hw_info);
+
+int cam_cpas_reg_write(uint32_t client_handle,
+ enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+ uint32_t value)
+{
+ int rc;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+ struct cam_cpas_hw_cmd_reg_read_write cmd_reg_write;
+
+ cmd_reg_write.client_handle = client_handle;
+ cmd_reg_write.reg_base = reg_base;
+ cmd_reg_write.offset = offset;
+ cmd_reg_write.value = value;
+ cmd_reg_write.mb = mb;
+
+ rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+ g_cpas_intf->hw_intf->hw_priv,
+ CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
+ sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_write);
+
+int cam_cpas_reg_read(uint32_t client_handle,
+ enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+ uint32_t *value)
+{
+ int rc;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (!value) {
+ CAM_ERR(CAM_CPAS, "Invalid arg value");
+ return -EINVAL;
+ }
+
+ if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+ struct cam_cpas_hw_cmd_reg_read_write cmd_reg_read;
+
+ cmd_reg_read.client_handle = client_handle;
+ cmd_reg_read.reg_base = reg_base;
+ cmd_reg_read.offset = offset;
+ cmd_reg_read.mb = mb;
+ cmd_reg_read.value = 0;
+
+ rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+ g_cpas_intf->hw_intf->hw_priv,
+ CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
+ sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+ return rc;
+ }
+
+ *value = cmd_reg_read.value;
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_read);
+
+int cam_cpas_update_axi_vote(uint32_t client_handle,
+ struct cam_axi_vote *axi_vote)
+{
+ int rc;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+ struct cam_cpas_hw_cmd_axi_vote cmd_axi_vote;
+
+ cmd_axi_vote.client_handle = client_handle;
+ cmd_axi_vote.axi_vote = axi_vote;
+
+ rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+ g_cpas_intf->hw_intf->hw_priv,
+ CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
+ sizeof(struct cam_cpas_hw_cmd_axi_vote));
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_axi_vote);
+
+int cam_cpas_update_ahb_vote(uint32_t client_handle,
+ struct cam_ahb_vote *ahb_vote)
+{
+ int rc;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+ struct cam_cpas_hw_cmd_ahb_vote cmd_ahb_vote;
+
+ cmd_ahb_vote.client_handle = client_handle;
+ cmd_ahb_vote.ahb_vote = ahb_vote;
+
+ rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+ g_cpas_intf->hw_intf->hw_priv,
+ CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
+ sizeof(struct cam_cpas_hw_cmd_ahb_vote));
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_ahb_vote);
+
+int cam_cpas_stop(uint32_t client_handle)
+{
+ int rc;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (g_cpas_intf->hw_intf->hw_ops.stop) {
+ struct cam_cpas_hw_cmd_stop cmd_hw_stop;
+
+ cmd_hw_stop.client_handle = client_handle;
+
+ rc = g_cpas_intf->hw_intf->hw_ops.stop(
+ g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
+ sizeof(struct cam_cpas_hw_cmd_stop));
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in stop, rc=%d", rc);
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid stop ops");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cpas_stop);
+
+int cam_cpas_start(uint32_t client_handle,
+ struct cam_ahb_vote *ahb_vote, struct cam_axi_vote *axi_vote)
+{
+ int rc;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (g_cpas_intf->hw_intf->hw_ops.start) {
+ struct cam_cpas_hw_cmd_start cmd_hw_start;
+
+ cmd_hw_start.client_handle = client_handle;
+ cmd_hw_start.ahb_vote = ahb_vote;
+ cmd_hw_start.axi_vote = axi_vote;
+
+ rc = g_cpas_intf->hw_intf->hw_ops.start(
+ g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
+ sizeof(struct cam_cpas_hw_cmd_start));
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in start, rc=%d", rc);
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid start ops");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cpas_start);
+
+int cam_cpas_unregister_client(uint32_t client_handle)
+{
+ int rc;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+ rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+ g_cpas_intf->hw_intf->hw_priv,
+ CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+ &client_handle, sizeof(uint32_t));
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cpas_unregister_client);
+
+int cam_cpas_register_client(
+ struct cam_cpas_register_params *register_params)
+{
+ int rc;
+
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+ rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+ g_cpas_intf->hw_intf->hw_priv,
+ CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
+ sizeof(struct cam_cpas_register_params));
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_cpas_register_client);
+
+int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
+ struct cam_control *cmd)
+{
+ int rc = 0;
+
+ if (!cmd) {
+ CAM_ERR(CAM_CPAS, "Invalid input cmd");
+ return -EINVAL;
+ }
+
+ switch (cmd->op_code) {
+ case CAM_QUERY_CAP: {
+ struct cam_cpas_query_cap query;
+
+ rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle),
+ sizeof(query));
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
+ rc);
+ break;
+ }
+
+ rc = cam_cpas_get_hw_info(&query.camera_family,
+ &query.camera_version, &query.cpas_version,
+ &query.reserved);
+ if (rc)
+ break;
+
+ rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query,
+ sizeof(query));
+ if (rc)
+ CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
+
+ break;
+ }
+ case CAM_SD_SHUTDOWN:
+ break;
+ default:
+ CAM_ERR(CAM_CPAS, "Unknown op code %d for CPAS", cmd->op_code);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_cpas_subdev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+ if (!cpas_intf || !cpas_intf->probe_done) {
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
+ return -ENODEV;
+ }
+
+ mutex_lock(&cpas_intf->intf_lock);
+ cpas_intf->open_cnt++;
+ CAM_DBG(CAM_CPAS, "CPAS Subdev open count %d", cpas_intf->open_cnt);
+ mutex_unlock(&cpas_intf->intf_lock);
+
+ return 0;
+}
+
+static int cam_cpas_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+ if (!cpas_intf || !cpas_intf->probe_done) {
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
+ return -ENODEV;
+ }
+
+ mutex_lock(&cpas_intf->intf_lock);
+ cpas_intf->open_cnt--;
+ CAM_DBG(CAM_CPAS, "CPAS Subdev close count %d", cpas_intf->open_cnt);
+ mutex_unlock(&cpas_intf->intf_lock);
+
+ return 0;
+}
+
+static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int32_t rc;
+ struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+ if (!cpas_intf || !cpas_intf->probe_done) {
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+ break;
+ default:
+ CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc;
+ struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+ if (!cpas_intf || !cpas_intf->probe_done) {
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
+ return -ENODEV;
+ }
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_CPAS, "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
+ break;
+ default:
+ CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_CPAS,
+ "Failed to copy to user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
+ return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops cpas_subdev_core_ops = {
+ .ioctl = cam_cpas_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_cpas_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops cpas_subdev_ops = {
+ .core = &cpas_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cpas_subdev_intern_ops = {
+ .open = cam_cpas_subdev_open,
+ .close = cam_cpas_subdev_close,
+};
+
+static int cam_cpas_subdev_register(struct platform_device *pdev)
+{
+ int rc;
+ struct cam_subdev *subdev;
+
+ if (!g_cpas_intf)
+ return -EINVAL;
+
+ subdev = &g_cpas_intf->subdev;
+
+ subdev->name = CAM_CPAS_DEV_NAME;
+ subdev->pdev = pdev;
+ subdev->ops = &cpas_subdev_ops;
+ subdev->internal_ops = &cpas_subdev_intern_ops;
+ subdev->token = g_cpas_intf;
+ subdev->sd_flags =
+ V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ subdev->ent_function = CAM_CPAS_DEVICE_TYPE;
+
+ rc = cam_register_subdev(subdev);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed register subdev: %s!",
+ CAM_CPAS_DEV_NAME);
+ return rc;
+ }
+
+ platform_set_drvdata(g_cpas_intf->pdev, g_cpas_intf);
+ return rc;
+}
+
+static int cam_cpas_dev_probe(struct platform_device *pdev)
+{
+ struct cam_cpas_hw_caps *hw_caps;
+ struct cam_hw_intf *hw_intf;
+ int rc;
+
+ if (g_cpas_intf) {
+ CAM_ERR(CAM_CPAS, "cpas dev proble already done");
+ return -EALREADY;
+ }
+
+ g_cpas_intf = kzalloc(sizeof(*g_cpas_intf), GFP_KERNEL);
+ if (!g_cpas_intf)
+ return -ENOMEM;
+
+ mutex_init(&g_cpas_intf->intf_lock);
+ g_cpas_intf->pdev = pdev;
+
+ rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
+ if (rc || (g_cpas_intf->hw_intf == NULL)) {
+ CAM_ERR(CAM_CPAS, "Failed in hw probe, rc=%d", rc);
+ goto error_destroy_mem;
+ }
+
+ hw_intf = g_cpas_intf->hw_intf;
+ hw_caps = &g_cpas_intf->hw_caps;
+ if (hw_intf->hw_ops.get_hw_caps) {
+ rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
+ hw_caps, sizeof(struct cam_cpas_hw_caps));
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "Failed in get_hw_caps, rc=%d", rc);
+ goto error_hw_remove;
+ }
+ } else {
+ CAM_ERR(CAM_CPAS, "Invalid get_hw_caps ops");
+ goto error_hw_remove;
+ }
+
+ rc = cam_cpas_subdev_register(pdev);
+ if (rc)
+ goto error_hw_remove;
+
+ g_cpas_intf->probe_done = true;
+ CAM_DBG(CAM_CPAS,
+ "CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x",
+ hw_caps->camera_family, hw_caps->camera_version.major,
+ hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+ hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+ hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+ return rc;
+
+error_hw_remove:
+ cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+error_destroy_mem:
+ mutex_destroy(&g_cpas_intf->intf_lock);
+ kfree(g_cpas_intf);
+ g_cpas_intf = NULL;
+ CAM_ERR(CAM_CPAS, "CPAS probe failed");
+ return rc;
+}
+
+static int cam_cpas_dev_remove(struct platform_device *dev)
+{
+ if (!CAM_CPAS_INTF_INITIALIZED()) {
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+ return -ENODEV;
+ }
+
+ mutex_lock(&g_cpas_intf->intf_lock);
+ g_cpas_intf->probe_done = false;
+ cam_unregister_subdev(&g_cpas_intf->subdev);
+ cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+ mutex_unlock(&g_cpas_intf->intf_lock);
+ mutex_destroy(&g_cpas_intf->intf_lock);
+ kfree(g_cpas_intf);
+ g_cpas_intf = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id cam_cpas_dt_match[] = {
+ {.compatible = "qcom,cam-cpas"},
+ {}
+};
+
+static struct platform_driver cam_cpas_driver = {
+ .probe = cam_cpas_dev_probe,
+ .remove = cam_cpas_dev_remove,
+ .driver = {
+ .name = CAM_CPAS_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cam_cpas_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_cpas_dev_init_module(void)
+{
+ return platform_driver_register(&cam_cpas_driver);
+}
+
+static void __exit cam_cpas_dev_exit_module(void)
+{
+ platform_driver_unregister(&cam_cpas_driver);
+}
+
+module_init(cam_cpas_dev_init_module);
+module_exit(cam_cpas_dev_exit_module);
+MODULE_DESCRIPTION("MSM CPAS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_soc.c
new file mode 100644
index 000000000000..7f2053cb23af
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_soc.c
@@ -0,0 +1,291 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
+ struct cam_cpas_private_soc *soc_private)
+{
+ struct device_node *of_node;
+ int count = 0, i = 0, rc = 0;
+
+ if (!soc_private || !pdev) {
+ CAM_ERR(CAM_CPAS, "invalid input arg %pK %pK",
+ soc_private, pdev);
+ return -EINVAL;
+ }
+
+ of_node = pdev->dev.of_node;
+
+ rc = of_property_read_string_index(of_node, "arch-compat", 0,
+ (const char **)&soc_private->arch_compat);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "device %s failed to read arch-compat",
+ pdev->name);
+ return rc;
+ }
+
+ soc_private->camnoc_axi_min_ib_bw = 0;
+ rc = of_property_read_u64(of_node,
+ "camnoc-axi-min-ib-bw",
+ &soc_private->camnoc_axi_min_ib_bw);
+ if (rc == -EOVERFLOW) {
+ soc_private->camnoc_axi_min_ib_bw = 0;
+ rc = of_property_read_u32(of_node,
+ "camnoc-axi-min-ib-bw",
+ (u32 *)&soc_private->camnoc_axi_min_ib_bw);
+ }
+
+ if (rc) {
+ CAM_DBG(CAM_CPAS,
+ "failed to read camnoc-axi-min-ib-bw rc:%d", rc);
+ soc_private->camnoc_axi_min_ib_bw =
+ CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+ }
+
+ CAM_DBG(CAM_CPAS, "camnoc-axi-min-ib-bw = %llu",
+ soc_private->camnoc_axi_min_ib_bw);
+
+ soc_private->client_id_based = of_property_read_bool(of_node,
+ "client-id-based");
+
+ count = of_property_count_strings(of_node, "client-names");
+ if (count <= 0) {
+ CAM_ERR(CAM_CPAS, "no client-names found");
+ count = 0;
+ return -EINVAL;
+ }
+ soc_private->num_clients = count;
+ CAM_DBG(CAM_CPAS,
+ "arch-compat=%s, client_id_based = %d, num_clients=%d",
+ soc_private->arch_compat, soc_private->client_id_based,
+ soc_private->num_clients);
+
+ for (i = 0; i < soc_private->num_clients; i++) {
+ rc = of_property_read_string_index(of_node,
+ "client-names", i, &soc_private->client_name[i]);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
+ return -ENODEV;
+ }
+ CAM_DBG(CAM_CPAS, "Client[%d] : %s", i,
+ soc_private->client_name[i]);
+ }
+
+ count = of_property_count_strings(of_node, "client-axi-port-names");
+ if ((count <= 0) || (count != soc_private->num_clients)) {
+ CAM_ERR(CAM_CPAS, "incorrect client-axi-port-names info %d %d",
+ count, soc_private->num_clients);
+ count = 0;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < soc_private->num_clients; i++) {
+ rc = of_property_read_string_index(of_node,
+ "client-axi-port-names", i,
+ &soc_private->client_axi_port_name[i]);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
+ return -ENODEV;
+ }
+ CAM_DBG(CAM_CPAS, "Client AXI Port[%d] : %s", i,
+ soc_private->client_axi_port_name[i]);
+ }
+
+ soc_private->axi_camnoc_based = of_property_read_bool(of_node,
+ "client-bus-camnoc-based");
+
+ soc_private->control_camnoc_axi_clk = of_property_read_bool(of_node,
+ "control-camnoc-axi-clk");
+
+ if (soc_private->control_camnoc_axi_clk == true) {
+ rc = of_property_read_u32(of_node, "camnoc-bus-width",
+ &soc_private->camnoc_bus_width);
+ if (rc || (soc_private->camnoc_bus_width == 0)) {
+ CAM_ERR(CAM_CPAS, "Bus width not found rc=%d, %d",
+ rc, soc_private->camnoc_bus_width);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "camnoc-axi-clk-bw-margin-perc",
+ &soc_private->camnoc_axi_clk_bw_margin);
+
+ if (rc) {
+ /* this is not fatal, overwrite rc */
+ rc = 0;
+ soc_private->camnoc_axi_clk_bw_margin = 0;
+ }
+ }
+
+ CAM_DBG(CAM_CPAS,
+ "control_camnoc_axi_clk=%d, width=%d, margin=%d",
+ soc_private->control_camnoc_axi_clk,
+ soc_private->camnoc_bus_width,
+ soc_private->camnoc_axi_clk_bw_margin);
+
+ count = of_property_count_u32_elems(of_node, "vdd-corners");
+ if ((count > 0) && (count <= CAM_REGULATOR_LEVEL_MAX) &&
+ (of_property_count_strings(of_node, "vdd-corner-ahb-mapping") ==
+ count)) {
+ const char *ahb_string;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_u32_index(of_node, "vdd-corners",
+ i, &soc_private->vdd_ahb[i].vdd_corner);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "vdd-corners failed at index=%d", i);
+ return -ENODEV;
+ }
+
+ rc = of_property_read_string_index(of_node,
+ "vdd-corner-ahb-mapping", i, &ahb_string);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "no ahb-mapping at index=%d", i);
+ return -ENODEV;
+ }
+
+ rc = cam_soc_util_get_level_from_string(ahb_string,
+ &soc_private->vdd_ahb[i].ahb_level);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "invalid ahb-string at index=%d", i);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_CPAS,
+ "Vdd-AHB mapping [%d] : [%d] [%s] [%d]", i,
+ soc_private->vdd_ahb[i].vdd_corner,
+ ahb_string, soc_private->vdd_ahb[i].ahb_level);
+ }
+
+ soc_private->num_vdd_ahb_mapping = count;
+ }
+
+ return 0;
+}
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *irq_data)
+{
+ int rc = 0;
+ struct cam_cpas_private_soc *soc_private;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in get_dt_properties, rc=%d", rc);
+ return rc;
+ }
+
+ if (soc_info->irq_line && !irq_handler) {
+ CAM_ERR(CAM_CPAS, "Invalid IRQ handler");
+ return -EINVAL;
+ }
+
+ rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+ irq_data);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in request_platform_resource, rc=%d",
+ rc);
+ return rc;
+ }
+
+ soc_info->soc_private = kzalloc(sizeof(struct cam_cpas_private_soc),
+ GFP_KERNEL);
+ if (!soc_info->soc_private) {
+ rc = -ENOMEM;
+ goto release_res;
+ }
+
+ rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "failed in get_custom_info, rc=%d", rc);
+ goto free_soc_private;
+ }
+
+ soc_private = soc_info->soc_private;
+ soc_private->soc_id = cam_soc_util_get_soc_id();
+ soc_private->hw_rev = cam_soc_util_get_hw_revision_node(soc_info);
+ CAM_DBG(CAM_CPAS, "soc id %d hw_rev %d",
+ soc_private->soc_id, soc_private->hw_rev);
+
+ return rc;
+
+free_soc_private:
+ kfree(soc_info->soc_private);
+release_res:
+ cam_soc_util_release_platform_resource(soc_info);
+ return rc;
+}
+
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+ if (rc)
+ CAM_ERR(CAM_CPAS, "release platform failed, rc=%d", rc);
+
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+
+ return rc;
+}
+
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level default_level)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ default_level, true);
+ if (rc)
+ CAM_ERR(CAM_CPAS, "enable platform resource failed, rc=%d", rc);
+
+ return rc;
+}
+
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clocks, bool disable_irq)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info,
+ disable_clocks, disable_irq);
+ if (rc)
+ CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
+
+ return rc;
+}
+
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_irq_disable(soc_info);
+ if (rc)
+ CAM_ERR(CAM_CPAS, "disable irq failed, rc=%d", rc);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_soc.h
new file mode 100644
index 000000000000..717e358a6144
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cam_cpas_soc.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_SOC_H_
+#define _CAM_CPAS_SOC_H_
+
+#include "cam_soc_util.h"
+#include "cam_cpas_hw.h"
+
+#define CAM_REGULATOR_LEVEL_MAX 16
+
+/**
+ * struct cam_cpas_vdd_ahb_mapping : Voltage to ahb level mapping
+ *
+ * @vdd_corner : Voltage corner value
+ * @ahb_level : AHB vote level corresponds to this vdd_corner
+ *
+ */
+struct cam_cpas_vdd_ahb_mapping {
+ unsigned int vdd_corner;
+ enum cam_vote_level ahb_level;
+};
+
+/**
+ * struct cam_cpas_private_soc : CPAS private DT info
+ *
+ * @arch_compat: ARCH compatible string
+ * @client_id_based: Whether clients are id based
+ * @num_clients: Number of clients supported
+ * @client_name: Client names
+ * @axi_camnoc_based: Whether AXi access is camnoc based
+ * @client_axi_port_name: AXI Port name for each client
+ * @axi_port_list_node : Node representing AXI Ports list
+ * @num_vdd_ahb_mapping : Number of vdd to ahb level mapping supported
+ * @vdd_ahb : AHB level mapping info for the supported vdd levels
+ * @soc_id : SOC id
+ * @hw_rev : Camera hw revision
+ * @control_camnoc_axi_clk : Whether CPAS driver need to set camnoc axi clk freq
+ * @camnoc_bus_width : CAMNOC Bus width
+ * @camnoc_axi_clk_bw_margin : BW Margin in percentage to add while calculating
+ * camnoc axi clock
+ * @camnoc_axi_min_ib_bw: Min camnoc BW which varies based on target
+ *
+ */
+struct cam_cpas_private_soc {
+ const char *arch_compat;
+ bool client_id_based;
+ uint32_t num_clients;
+ const char *client_name[CAM_CPAS_MAX_CLIENTS];
+ bool axi_camnoc_based;
+ const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
+ struct device_node *axi_port_list_node;
+ uint32_t num_vdd_ahb_mapping;
+ struct cam_cpas_vdd_ahb_mapping vdd_ahb[CAM_REGULATOR_LEVEL_MAX];
+ uint32_t soc_id;
+ uint32_t hw_rev;
+ bool control_camnoc_axi_clk;
+ uint32_t camnoc_bus_width;
+ uint32_t camnoc_axi_clk_bw_margin;
+ uint64_t camnoc_axi_min_ib_bw;
+};
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t vfe_irq_handler, void *irq_data);
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level default_level);
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clocks, bool disable_irq);
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info);
+#endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/camss_top/Makefile b/drivers/media/platform/msm/camera_v3/cam_cpas/camss_top/Makefile
new file mode 100644
index 000000000000..a522c5d4fde1
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/camss_top/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_camsstop_hw.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera_v3/cam_cpas/camss_top/cam_camsstop_hw.c
new file mode 100644
index 000000000000..7da3c39699fc
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -0,0 +1,89 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_camsstop_get_hw_info(struct cam_hw_info *cpas_hw,
+ struct cam_cpas_hw_caps *hw_caps)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CAMSS];
+ uint32_t reg_value;
+
+ if (reg_indx == -1)
+ return -EINVAL;
+
+ hw_caps->camera_family = CAM_FAMILY_CAMERA_SS;
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+ hw_caps->camera_version.major =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+ hw_caps->camera_version.minor =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->camera_version.incr =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d",
+ hw_caps->camera_family, hw_caps->camera_version.major,
+ hw_caps->camera_version.minor, hw_caps->camera_version.incr);
+
+ return 0;
+}
+
+int cam_camsstop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+ int32_t regbase_index[], int32_t num_reg_map)
+{
+ uint32_t index;
+ int rc;
+
+ if (num_reg_map > CAM_CPAS_REG_MAX) {
+ CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
+ return -EINVAL;
+ }
+
+ if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+ CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+ soc_info->num_mem_block);
+ return -EINVAL;
+ }
+
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+ soc_info->num_mem_block, "cam_camss", &index);
+ if ((rc == 0) && (index < num_reg_map)) {
+ regbase_index[CAM_CPAS_REG_CAMSS] = index;
+ } else {
+ CAM_ERR(CAM_CPAS, "regbase not found for CAM_CPAS_REG_CAMSS");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+ if (!internal_ops) {
+ CAM_ERR(CAM_CPAS, "invalid NULL param");
+ return -EINVAL;
+ }
+
+ internal_ops->get_hw_info = cam_camsstop_get_hw_info;
+ internal_ops->init_hw_version = NULL;
+ internal_ops->handle_irq = NULL;
+ internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
+ internal_ops->power_on = NULL;
+ internal_ops->power_off = NULL;
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/Makefile b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/Makefile
new file mode 100644
index 000000000000..cec4f1f9f8dd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpastop_hw.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cam_cpastop_hw.c
new file mode 100644
index 000000000000..8f89c4b7b5fd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -0,0 +1,672 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpastop_hw.h"
+#include "cam_io_util.h"
+#include "cam_cpas_soc.h"
+#include "cpastop100.h"
+#include "cpastop_v170_110.h"
+#include "cpastop_v175_100.h"
+#include "cpastop_v175_101.h"
+
+struct cam_camnoc_info *camnoc_info;
+
+#define CAMNOC_SLAVE_MAX_ERR_CODE 7
+static const char * const camnoc_salve_err_code[] = {
+ "Target Error", /* err code 0 */
+ "Address decode error", /* err code 1 */
+ "Unsupported request", /* err code 2 */
+ "Disconnected target", /* err code 3 */
+ "Security violation", /* err code 4 */
+ "Hidden security violation", /* err code 5 */
+ "Timeout Error", /* err code 6 */
+ "Unknown Error", /* unknown err code */
+};
+
+static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
+ struct cam_cpas_hw_caps *hw_caps)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP];
+ uint32_t reg_value;
+
+ if (reg_indx == -1)
+ return -EINVAL;
+
+ hw_caps->camera_family = CAM_FAMILY_CPAS_SS;
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+ hw_caps->camera_version.major =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+ hw_caps->camera_version.minor =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+ hw_caps->camera_version.incr =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
+ hw_caps->cpas_version.major =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+ hw_caps->cpas_version.minor =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->cpas_version.incr =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
+ hw_caps->camera_capability = reg_value;
+
+ CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x",
+ hw_caps->camera_family, hw_caps->camera_version.major,
+ hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+ hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+ hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+ soc_info->hw_version = CAM_CPAS_TITAN_NONE;
+
+ if ((hw_caps->camera_version.major == 1) &&
+ (hw_caps->camera_version.minor == 7) &&
+ (hw_caps->camera_version.incr == 0)) {
+ if ((hw_caps->cpas_version.major == 1) &&
+ (hw_caps->cpas_version.minor == 0) &&
+ (hw_caps->cpas_version.incr == 0))
+ soc_info->hw_version = CAM_CPAS_TITAN_170_V100;
+ else if ((hw_caps->cpas_version.major == 1) &&
+ (hw_caps->cpas_version.minor == 1) &&
+ (hw_caps->cpas_version.incr == 0))
+ soc_info->hw_version = CAM_CPAS_TITAN_170_V110;
+ else if ((hw_caps->cpas_version.major == 1) &&
+ (hw_caps->cpas_version.minor == 2) &&
+ (hw_caps->cpas_version.incr == 0))
+ soc_info->hw_version = CAM_CPAS_TITAN_170_V120;
+ } else if ((hw_caps->camera_version.major == 1) &&
+ (hw_caps->camera_version.minor == 7) &&
+ (hw_caps->camera_version.incr == 5)) {
+ if ((hw_caps->cpas_version.major == 1) &&
+ (hw_caps->cpas_version.minor == 0) &&
+ (hw_caps->cpas_version.incr == 0))
+ soc_info->hw_version = CAM_CPAS_TITAN_175_V100;
+ else if ((hw_caps->cpas_version.major == 1) &&
+ (hw_caps->cpas_version.minor == 0) &&
+ (hw_caps->cpas_version.incr == 1))
+ soc_info->hw_version = CAM_CPAS_TITAN_175_V101;
+ }
+
+ CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_info->hw_version);
+
+ return 0;
+}
+
+static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+ int32_t regbase_index[], int32_t num_reg_map)
+{
+ uint32_t index;
+ int rc;
+
+ if (num_reg_map > CAM_CPAS_REG_MAX) {
+ CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
+ return -EINVAL;
+ }
+
+ if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+ CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+ soc_info->num_mem_block);
+ return -EINVAL;
+ }
+
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+ soc_info->num_mem_block, "cam_cpas_top", &index);
+ if ((rc == 0) && (index < num_reg_map)) {
+ regbase_index[CAM_CPAS_REG_CPASTOP] = index;
+ } else {
+ CAM_ERR(CAM_CPAS, "regbase not found for CPASTOP, rc=%d, %d %d",
+ rc, index, num_reg_map);
+ return -EINVAL;
+ }
+
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+ soc_info->num_mem_block, "cam_camnoc", &index);
+ if ((rc == 0) && (index < num_reg_map)) {
+ regbase_index[CAM_CPAS_REG_CAMNOC] = index;
+ } else {
+ CAM_ERR(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d",
+ rc, index, num_reg_map);
+ return -EINVAL;
+ }
+
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+ soc_info->num_mem_block, "core_top_csr_tcsr", &index);
+ if ((rc == 0) && (index < num_reg_map)) {
+ regbase_index[CAM_CPAS_REG_CSR_TCSR] = index;
+ } else {
+ CAM_DBG(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d",
+ rc, index, num_reg_map);
+ }
+
+ return 0;
+}
+
+static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_camnoc_irq_slave_err_data *slave_err)
+{
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+ int err_code_index = 0;
+
+ if (!camnoc_info->err_logger) {
+ CAM_ERR_RATE_LIMIT(CAM_CPAS, "Invalid err logger info");
+ return -EINVAL;
+ }
+
+ slave_err->mainctrl.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->mainctrl);
+
+ slave_err->errvld.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errvld);
+
+ slave_err->errlog0_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog0_low);
+
+ slave_err->errlog0_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog0_high);
+
+ slave_err->errlog1_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog1_low);
+
+ slave_err->errlog1_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog1_high);
+
+ slave_err->errlog2_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog2_low);
+
+ slave_err->errlog2_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog2_high);
+
+ slave_err->errlog3_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog3_low);
+
+ slave_err->errlog3_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog3_high);
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "Possible memory configuration issue, fault at SMMU raised as CAMNOC SLAVE_IRQ");
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "mainctrl[0x%x 0x%x] errvld[0x%x 0x%x] stall_en=%d, fault_en=%d, err_vld=%d",
+ camnoc_info->err_logger->mainctrl,
+ slave_err->mainctrl.value,
+ camnoc_info->err_logger->errvld,
+ slave_err->errvld.value,
+ slave_err->mainctrl.stall_en,
+ slave_err->mainctrl.fault_en,
+ slave_err->errvld.err_vld);
+
+ err_code_index = slave_err->errlog0_low.err_code;
+ if (err_code_index > CAMNOC_SLAVE_MAX_ERR_CODE)
+ err_code_index = CAMNOC_SLAVE_MAX_ERR_CODE;
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "errlog0 low[0x%x 0x%x] high[0x%x 0x%x] loginfo_vld=%d, word_error=%d, non_secure=%d, device=%d, opc=%d, err_code=%d(%s) sizef=%d, addr_space=%d, len1=%d",
+ camnoc_info->err_logger->errlog0_low,
+ slave_err->errlog0_low.value,
+ camnoc_info->err_logger->errlog0_high,
+ slave_err->errlog0_high.value,
+ slave_err->errlog0_low.loginfo_vld,
+ slave_err->errlog0_low.word_error,
+ slave_err->errlog0_low.non_secure,
+ slave_err->errlog0_low.device,
+ slave_err->errlog0_low.opc,
+ slave_err->errlog0_low.err_code,
+ camnoc_salve_err_code[err_code_index],
+ slave_err->errlog0_low.sizef,
+ slave_err->errlog0_low.addr_space,
+ slave_err->errlog0_high.len1);
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "errlog1_low[0x%x 0x%x] errlog1_high[0x%x 0x%x] errlog2_low[0x%x 0x%x] errlog2_high[0x%x 0x%x] errlog3_low[0x%x 0x%x] errlog3_high[0x%x 0x%x]",
+ camnoc_info->err_logger->errlog1_low,
+ slave_err->errlog1_low.value,
+ camnoc_info->err_logger->errlog1_high,
+ slave_err->errlog1_high.value,
+ camnoc_info->err_logger->errlog2_low,
+ slave_err->errlog2_low.value,
+ camnoc_info->err_logger->errlog2_high,
+ slave_err->errlog2_high.value,
+ camnoc_info->err_logger->errlog3_low,
+ slave_err->errlog3_low.value,
+ camnoc_info->err_logger->errlog3_high,
+ slave_err->errlog3_high.value);
+
+ return 0;
+}
+
+static int cam_cpastop_handle_ubwc_enc_err(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info, int i,
+ struct cam_camnoc_irq_ubwc_enc_data *enc_err)
+{
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+ enc_err->encerr_status.value =
+ cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->irq_err[i].err_status.offset);
+
+ /* Let clients handle the UBWC errors */
+ CAM_DBG(CAM_CPAS,
+ "ubwc enc err [%d]: offset[0x%x] value[0x%x]",
+ i, camnoc_info->irq_err[i].err_status.offset,
+ enc_err->encerr_status.value);
+
+ return 0;
+}
+
+static int cam_cpastop_handle_ubwc_dec_err(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info, int i,
+ struct cam_camnoc_irq_ubwc_dec_data *dec_err)
+{
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+ dec_err->decerr_status.value =
+ cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->irq_err[i].err_status.offset);
+
+ /* Let clients handle the UBWC errors */
+ CAM_DBG(CAM_CPAS,
+ "ubwc dec err status [%d]: offset[0x%x] value[0x%x] thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+ i, camnoc_info->irq_err[i].err_status.offset,
+ dec_err->decerr_status.value,
+ dec_err->decerr_status.thr_err,
+ dec_err->decerr_status.fcl_err,
+ dec_err->decerr_status.len_md_err,
+ dec_err->decerr_status.format_err);
+
+ return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw,
+ struct cam_camnoc_irq_ahb_timeout_data *ahb_err)
+{
+ CAM_ERR_RATE_LIMIT(CAM_CPAS, "ahb timeout error");
+
+ return 0;
+}
+
+static int cam_cpastop_disable_test_irq(struct cam_hw_info *cpas_hw)
+{
+ camnoc_info->irq_sbm->sbm_clear.value &= ~0x4;
+ camnoc_info->irq_sbm->sbm_enable.value &= ~0x100;
+ camnoc_info->irq_err[CAM_CAMNOC_HW_IRQ_CAMNOC_TEST].enable = false;
+
+ return 0;
+}
+
+static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
+{
+ int i;
+
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->irq_sbm->sbm_clear);
+ for (i = 0; i < camnoc_info->irq_err_size; i++) {
+ if (camnoc_info->irq_err[i].enable)
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->irq_err[i].err_clear);
+ }
+
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->irq_sbm->sbm_enable);
+ for (i = 0; i < camnoc_info->irq_err_size; i++) {
+ if (camnoc_info->irq_err[i].enable)
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->irq_err[i].err_enable);
+ }
+
+ return 0;
+}
+
+static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
+ struct cam_cpas_irq_data *irq_data)
+{
+ int i;
+ struct cam_cpas_client *cpas_client;
+ bool error_handled = false;
+
+ CAM_DBG(CAM_CPAS,
+ "Notify CB : num_clients=%d, registered=%d, started=%d",
+ cpas_core->num_clients, cpas_core->registered_clients,
+ cpas_core->streamon_clients);
+
+ for (i = 0; i < cpas_core->num_clients; i++) {
+ if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
+ cpas_client = cpas_core->cpas_client[i];
+ if (cpas_client->data.cam_cpas_client_cb) {
+ CAM_DBG(CAM_CPAS,
+ "Calling client CB %d : %d",
+ i, irq_data->irq_type);
+ error_handled =
+ cpas_client->data.cam_cpas_client_cb(
+ cpas_client->data.client_handle,
+ cpas_client->data.userdata,
+ irq_data);
+ if (error_handled)
+ break;
+ }
+ }
+ }
+}
+
+static void cam_cpastop_work(struct work_struct *work)
+{
+ struct cam_cpas_work_payload *payload;
+ struct cam_hw_info *cpas_hw;
+ struct cam_cpas *cpas_core;
+ struct cam_hw_soc_info *soc_info;
+ int i;
+ enum cam_camnoc_hw_irq_type irq_type;
+ struct cam_cpas_irq_data irq_data;
+
+ payload = container_of(work, struct cam_cpas_work_payload, work);
+ if (!payload) {
+ CAM_ERR(CAM_CPAS, "NULL payload");
+ return;
+ }
+
+ cpas_hw = payload->hw;
+ cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ soc_info = &cpas_hw->soc_info;
+
+ if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+ CAM_ERR(CAM_CPAS, "CPAS off");
+ return;
+ }
+
+ for (i = 0; i < camnoc_info->irq_err_size; i++) {
+ if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
+ (camnoc_info->irq_err[i].enable)) {
+ irq_type = camnoc_info->irq_err[i].irq_type;
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "Error occurred, type=%d", irq_type);
+ memset(&irq_data, 0x0, sizeof(irq_data));
+ irq_data.irq_type = (enum cam_camnoc_irq_type)irq_type;
+
+ switch (irq_type) {
+ case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
+ cam_cpastop_handle_errlogger(
+ cpas_core, soc_info,
+ &irq_data.u.slave_err);
+ break;
+ case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
+ case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
+ case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+ cam_cpastop_handle_ubwc_enc_err(
+ cpas_core, soc_info, i,
+ &irq_data.u.enc_err);
+ break;
+ case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+ cam_cpastop_handle_ubwc_dec_err(
+ cpas_core, soc_info, i,
+ &irq_data.u.dec_err);
+ break;
+ case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
+ cam_cpastop_handle_ahb_timeout_err(
+ cpas_hw, &irq_data.u.ahb_err);
+ break;
+ case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
+ CAM_DBG(CAM_CPAS, "TEST IRQ");
+ break;
+ default:
+ CAM_ERR(CAM_CPAS, "Invalid IRQ type");
+ break;
+ }
+
+ cam_cpastop_notify_clients(cpas_core, &irq_data);
+
+ payload->irq_status &=
+ ~camnoc_info->irq_err[i].sbm_port;
+ }
+ }
+ atomic_dec(&cpas_core->irq_count);
+ wake_up(&cpas_core->irq_count_wq);
+ CAM_DBG(CAM_CPAS, "irq_count=%d\n", atomic_read(&cpas_core->irq_count));
+
+ if (payload->irq_status)
+ CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
+ payload->irq_status);
+
+ kfree(payload);
+}
+
+static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+ struct cam_cpas_work_payload *payload;
+
+ if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+ CAM_ERR(CAM_CPAS, "CPAS off");
+ return IRQ_HANDLED;
+ }
+
+ payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
+ if (!payload)
+ goto done;
+
+ payload->irq_status = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->irq_sbm->sbm_status.offset);
+
+ CAM_DBG(CAM_CPAS, "IRQ callback, irq_status=0x%x", payload->irq_status);
+
+ payload->hw = cpas_hw;
+ INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
+
+ if (TEST_IRQ_ENABLE)
+ cam_cpastop_disable_test_irq(cpas_hw);
+
+ cam_cpastop_reset_irq(cpas_hw);
+
+ queue_work(cpas_core->work_queue, &payload->work);
+done:
+ atomic_dec(&cpas_core->irq_count);
+ wake_up(&cpas_core->irq_count_wq);
+
+ return IRQ_HANDLED;
+}
+
+static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
+{
+ int i;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_cpas_private_soc *soc_private = soc_info->soc_private;
+
+ cam_cpastop_reset_irq(cpas_hw);
+
+ for (i = 0; i < camnoc_info->specific_size; i++) {
+ if (camnoc_info->specific[i].enable) {
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->specific[i].priority_lut_low);
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->specific[i].priority_lut_high);
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->specific[i].urgency);
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->specific[i].danger_lut);
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->specific[i].safe_lut);
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->specific[i].ubwc_ctl);
+ cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+ &camnoc_info->specific[i].flag_out_set0_low);
+ }
+ }
+
+ if (soc_private && ((soc_private->soc_id == SDM710_SOC_ID) ||
+ (soc_private->soc_id == SDM712_SOC_ID) ||
+ ((soc_private->soc_id == SDM670_SOC_ID) &&
+ (soc_private->hw_rev == SDM670_V1_1)))) {
+
+ struct cam_cpas_reg *reg_info;
+ int tcsr_index;
+ void __iomem *mem_base;
+
+ reg_info = &camnoc_info->errata_wa_list->tcsr_reg.
+ tcsr_conn_box_spare_0;
+ tcsr_index = cpas_core->regbase_index[CAM_CPAS_REG_CSR_TCSR];
+ if (tcsr_index == -1) {
+ CAM_DBG(CAM_CPAS, "index in not initialized");
+ return 0;
+ }
+ mem_base = soc_info->reg_map[tcsr_index].mem_base;
+
+ reg_info->value = TCSR_CONN_SET;
+ cam_io_w_mb(reg_info->value, mem_base + reg_info->offset);
+ CAM_DBG(CAM_CPAS, "tcsr(0x%lx) value %d",
+ (unsigned long int)mem_base + reg_info->offset,
+ cam_io_r_mb(mem_base + reg_info->offset));
+ }
+
+ return 0;
+}
+
+static int cam_cpastop_poweroff(struct cam_hw_info *cpas_hw)
+{
+ struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+ int rc = 0;
+ struct cam_cpas_hw_errata_wa_list *errata_wa_list =
+ camnoc_info->errata_wa_list;
+ struct cam_cpas_private_soc *soc_private =
+ cpas_hw->soc_info.soc_private;
+
+ if (!errata_wa_list)
+ return 0;
+
+ if (errata_wa_list->camnoc_flush_slave_pending_trans.enable) {
+ struct cam_cpas_hw_errata_wa *errata_wa =
+ &errata_wa_list->camnoc_flush_slave_pending_trans;
+
+ rc = cam_io_poll_value_wmask(
+ soc_info->reg_map[camnoc_index].mem_base +
+ errata_wa->data.reg_info.offset,
+ errata_wa->data.reg_info.value,
+ errata_wa->data.reg_info.mask,
+ CAM_CPAS_POLL_RETRY_CNT,
+ CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
+ if (rc) {
+ CAM_DBG(CAM_CPAS,
+ "camnoc flush slave pending trans failed");
+ /* Do not return error, passthrough */
+ rc = 0;
+ }
+ }
+
+ if (soc_private && ((soc_private->soc_id == SDM710_SOC_ID)
+ || (soc_private->soc_id == SDM712_SOC_ID)
+ || ((soc_private->soc_id == SDM670_SOC_ID) &&
+ (soc_private->hw_rev == SDM670_V1_1)))) {
+
+ struct cam_cpas_reg *reg_info;
+ int tcsr_index;
+ void __iomem *mem_base;
+
+ reg_info = &camnoc_info->errata_wa_list->tcsr_reg.
+ tcsr_conn_box_spare_0;
+ reg_info->value = TCSR_CONN_RESET;
+ tcsr_index = cpas_core->regbase_index[CAM_CPAS_REG_CSR_TCSR];
+ if (tcsr_index == -1) {
+ CAM_DBG(CAM_CPAS, "index in not initialized");
+ return 0;
+ }
+ mem_base = soc_info->reg_map[tcsr_index].mem_base;
+ cam_io_w_mb(reg_info->value, mem_base + reg_info->offset);
+ CAM_DBG(CAM_CPAS, "tcsr(0x%lx) value %d",
+ (unsigned long int)mem_base + reg_info->offset,
+ cam_io_r_mb(mem_base + reg_info->offset));
+ }
+
+ return rc;
+}
+
+static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
+ struct cam_cpas_hw_caps *hw_caps)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+
+ CAM_DBG(CAM_CPAS,
+ "hw_version=0x%x Camera Version %d.%d.%d, cpas version %d.%d.%d",
+ soc_info->hw_version,
+ hw_caps->camera_version.major,
+ hw_caps->camera_version.minor,
+ hw_caps->camera_version.incr,
+ hw_caps->cpas_version.major,
+ hw_caps->cpas_version.minor,
+ hw_caps->cpas_version.incr);
+
+ switch (soc_info->hw_version) {
+ case CAM_CPAS_TITAN_170_V100:
+ camnoc_info = &cam170_cpas100_camnoc_info;
+ break;
+ case CAM_CPAS_TITAN_170_V110:
+ camnoc_info = &cam170_cpas110_camnoc_info;
+ break;
+ case CAM_CPAS_TITAN_175_V100:
+ camnoc_info = &cam175_cpas100_camnoc_info;
+ break;
+ case CAM_CPAS_TITAN_175_V101:
+ camnoc_info = &cam175_cpas101_camnoc_info;
+ break;
+ default:
+ CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
+ hw_caps->camera_version.major,
+ hw_caps->camera_version.minor,
+ hw_caps->camera_version.incr);
+ rc = -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+ if (!internal_ops) {
+ CAM_ERR(CAM_CPAS, "invalid NULL param");
+ return -EINVAL;
+ }
+
+ internal_ops->get_hw_info = cam_cpastop_get_hw_info;
+ internal_ops->init_hw_version = cam_cpastop_init_hw_version;
+ internal_ops->handle_irq = cam_cpastop_handle_irq;
+ internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
+ internal_ops->power_on = cam_cpastop_poweron;
+ internal_ops->power_off = cam_cpastop_poweroff;
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cam_cpastop_hw.h
new file mode 100644
index 000000000000..31e39e3630ec
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -0,0 +1,273 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPASTOP_HW_H_
+#define _CAM_CPASTOP_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw.h"
+
+/**
+ * enum cam_camnoc_hw_irq_type - Enum for camnoc error types
+ *
+ * @CAM_CAMNOC_HW_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ * 1 QHB port) has an error logger. The error
+ * observed at any slave port is logged into
+ * the error logger register and an IRQ is
+ * triggered
+ * @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR : Triggered if any error
+ * detected in the IFE0 UBWC
+ * encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR : Triggered if any error
+ * detected in the IFE1 or IFE3
+ * UBWC encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error
+ * detected in the IPE/BPS
+ * UBWC decoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error
+ * detected in the IPE/BPS UBWC
+ * encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR: Triggered if any UBWC error
+ * is detected in IFE0 write path
+ * @CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR: Triggered if any UBWC error
+ * is detected in IFE1 write path
+ * @CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT : Triggered when the QHS_ICP
+ * slave times out after 4000
+ * AHB cycles
+ * @CAM_CAMNOC_HW_IRQ_RESERVED1 : Reserved
+ * @CAM_CAMNOC_HW_IRQ_RESERVED2 : Reserved
+ * @CAM_CAMNOC_HW_IRQ_CAMNOC_TEST : To test the IRQ logic
+ */
+enum cam_camnoc_hw_irq_type {
+ CAM_CAMNOC_HW_IRQ_SLAVE_ERROR =
+ CAM_CAMNOC_IRQ_SLAVE_ERROR,
+ CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR =
+ CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR =
+ CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR =
+ CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR =
+ CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR =
+ CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+ CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR =
+ CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT =
+ CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+ CAM_CAMNOC_HW_IRQ_RESERVED1,
+ CAM_CAMNOC_HW_IRQ_RESERVED2,
+ CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+};
+
+/**
+ * enum cam_camnoc_port_type - Enum for different camnoc hw ports. All CAMNOC
+ * settings like QoS, LUT mappings need to be configured for
+ * each of these ports.
+ *
+ * @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc
+ * @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc
+ * @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc
+ * @CAM_CAMNOC_IFE0123_RDI_WRITE: RDI write only for all IFEx
+ * @CAM_CAMNOC_IFE0_NRDI_WRITE: IFE0 non-RDI write
+ * @CAM_CAMNOC_IFE01_RDI_READ: IFE0/1 RDI READ
+ * @CAM_CAMNOC_IFE1_NRDI_WRITE: IFE1 non-RDI write
+ * @CAM_CAMNOC_IPE_BPS_LRME_READ: Indicates IPE, BPS, LRME Read HW
+ * connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_WRITE: Indicates IPE, BPS, LRME Write HW
+ * connection to camnoc
+ * @CAM_CAMNOC_IPE_VID_DISP_WRITE: Indicates IPE's VID/DISP Wrire HW
+ * connection to camnoc
+ * @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc
+ * @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc
+ * @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc
+ */
+enum cam_camnoc_port_type {
+ CAM_CAMNOC_CDM,
+ CAM_CAMNOC_IFE02,
+ CAM_CAMNOC_IFE13,
+ CAM_CAMNOC_IFE0123_RDI_WRITE,
+ CAM_CAMNOC_IFE0_NRDI_WRITE,
+ CAM_CAMNOC_IFE01_RDI_READ,
+ CAM_CAMNOC_IFE1_NRDI_WRITE,
+ CAM_CAMNOC_IPE_BPS_LRME_READ,
+ CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+ CAM_CAMNOC_IPE_VID_DISP_WRITE,
+ CAM_CAMNOC_JPEG,
+ CAM_CAMNOC_FD,
+ CAM_CAMNOC_ICP,
+};
+
+/**
+ * struct cam_camnoc_specific : CPAS camnoc specific settings
+ *
+ * @port_type: Port type
+ * @enable: Whether to enable settings for this connection
+ * @priority_lut_low: Priority Low LUT mapping for this connection
+ * @priority_lut_high: Priority High LUT mapping for this connection
+ * @urgency: Urgency (QoS) settings for this connection
+ * @danger_lut: Danger LUT mapping for this connection
+ * @safe_lut: Safe LUT mapping for this connection
+ * @ubwc_ctl: UBWC control settings for this connection
+ *
+ */
+struct cam_camnoc_specific {
+ enum cam_camnoc_port_type port_type;
+ bool enable;
+ struct cam_cpas_reg priority_lut_low;
+ struct cam_cpas_reg priority_lut_high;
+ struct cam_cpas_reg urgency;
+ struct cam_cpas_reg danger_lut;
+ struct cam_cpas_reg safe_lut;
+ struct cam_cpas_reg ubwc_ctl;
+ struct cam_cpas_reg flag_out_set0_low;
+};
+
+/**
+ * struct cam_camnoc_irq_sbm : Sideband manager settings for all CAMNOC IRQs
+ *
+ * @sbm_enable: SBM settings for IRQ enable
+ * @sbm_status: SBM settings for IRQ status
+ * @sbm_clear: SBM settings for IRQ clear
+ *
+ */
+struct cam_camnoc_irq_sbm {
+ struct cam_cpas_reg sbm_enable;
+ struct cam_cpas_reg sbm_status;
+ struct cam_cpas_reg sbm_clear;
+};
+
+/**
+ * struct cam_camnoc_irq_err : Error settings specific to each CAMNOC IRQ
+ *
+ * @irq_type: Type of IRQ
+ * @enable: Whether to enable error settings for this IRQ
+ * @sbm_port: Corresponding SBM port for this IRQ
+ * @err_enable: Error enable settings for this IRQ
+ * @err_status: Error status settings for this IRQ
+ * @err_clear: Error clear settings for this IRQ
+ *
+ */
+struct cam_camnoc_irq_err {
+ enum cam_camnoc_hw_irq_type irq_type;
+ bool enable;
+ uint32_t sbm_port;
+ struct cam_cpas_reg err_enable;
+ struct cam_cpas_reg err_status;
+ struct cam_cpas_reg err_clear;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa : Struct for HW errata workaround info
+ *
+ * @enable: Whether to enable this errata workround
+ * @data: HW Errata workaround data
+ *
+ */
+struct cam_cpas_hw_errata_wa {
+ bool enable;
+ union {
+ struct cam_cpas_reg reg_info;
+ } data;
+};
+
+/**
+ * struct cam_camnoc_tcsr_regs : Top control Status register
+ *
+ * @tcsr_conn_box_spare_0: spare register to select PriorityLvl
+ * for IFE0 and IFE1 (HW workaround for SDM670 1.1)
+ *
+ */
+struct cam_camnoc_tcsr_regs {
+ struct cam_cpas_reg tcsr_conn_box_spare_0;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa_list : List of HW Errata workaround info
+ *
+ * @camnoc_flush_slave_pending_trans: Errata workaround info for flushing
+ * camnoc slave pending transactions before turning off CPAS_TOP gdsc
+ * @tcsr_reg: HW workaround to select PriorityLvl for IFE0 and IFE(SDM670_1.1)
+ *
+ */
+struct cam_cpas_hw_errata_wa_list {
+ struct cam_cpas_hw_errata_wa camnoc_flush_slave_pending_trans;
+ struct cam_camnoc_tcsr_regs tcsr_reg;
+};
+
+/**
+ * struct cam_camnoc_err_logger_info : CAMNOC error logger register offsets
+ *
+ * @mainctrl: Register offset for mainctrl
+ * @errvld: Register offset for errvld
+ * @errlog0_low: Register offset for errlog0_low
+ * @errlog0_high: Register offset for errlog0_high
+ * @errlog1_low: Register offset for errlog1_low
+ * @errlog1_high: Register offset for errlog1_high
+ * @errlog2_low: Register offset for errlog2_low
+ * @errlog2_high: Register offset for errlog2_high
+ * @errlog3_low: Register offset for errlog3_low
+ * @errlog3_high: Register offset for errlog3_high
+ *
+ */
+struct cam_camnoc_err_logger_info {
+ uint32_t mainctrl;
+ uint32_t errvld;
+ uint32_t errlog0_low;
+ uint32_t errlog0_high;
+ uint32_t errlog1_low;
+ uint32_t errlog1_high;
+ uint32_t errlog2_low;
+ uint32_t errlog2_high;
+ uint32_t errlog3_low;
+ uint32_t errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_info : Overall CAMNOC settings info
+ *
+ * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
+ * @specific_size: Array size of SPECIFICTONTTPTR settings
+ * @irq_sbm: Pointer to CAMNOC IRQ SBM settings
+ * @irq_err: Pointer to CAMNOC IRQ Error settings
+ * @irq_err_size: Array size of IRQ Error settings
+ * @err_logger: Pointer to CAMNOC IRQ Error logger read registers
+ * @errata_wa_list: HW Errata workaround info
+ *
+ */
+struct cam_camnoc_info {
+ struct cam_camnoc_specific *specific;
+ int specific_size;
+ struct cam_camnoc_irq_sbm *irq_sbm;
+ struct cam_camnoc_irq_err *irq_err;
+ int irq_err_size;
+ struct cam_camnoc_err_logger_info *err_logger;
+ struct cam_cpas_hw_errata_wa_list *errata_wa_list;
+};
+
+/**
+ * struct cam_cpas_work_payload : Struct for cpas work payload data
+ *
+ * @hw: Pointer to HW info
+ * @irq_status: IRQ status value
+ * @irq_data: IRQ data
+ * @work: Work handle
+ *
+ */
+struct cam_cpas_work_payload {
+ struct cam_hw_info *hw;
+ uint32_t irq_status;
+ uint32_t irq_data;
+ struct work_struct work;
+};
+
+#endif /* _CAM_CPASTOP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop100.h
new file mode 100644
index 000000000000..83ba2cf3b206
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop100.h
@@ -0,0 +1,538 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CPASTOP100_H_
+#define _CPASTOP100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas100_irq_sbm = {
+ .sbm_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+ .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+ 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+ 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+ 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+ 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+ 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+ (TEST_IRQ_ENABLE ?
+ 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+ 0x0),
+ },
+ .sbm_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+ },
+ .sbm_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+ .value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+ }
+};
+
+static struct cam_camnoc_irq_err
+ cam_cpas100_irq_err[] = {
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+ .enable = true,
+ .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x1190,
+ /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+ .enable = true,
+ .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+ .enable = TEST_IRQ_ENABLE ? true : false,
+ .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x5,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+};
+
+static struct cam_camnoc_specific
+ cam_cpas100_camnoc_specific[] = {
+ {
+ .port_type = CAM_CAMNOC_CDM,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+ .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+ .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+ .value = 0,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE02,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+ .value = 0x66665433,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+ .value = 0x3,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE13,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+ .value = 0x66665433,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+ .value = 0x3,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+ .mask = 0x7,
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+ .shift = 0x0,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_JPEG,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+ .value = 0x22,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_FD,
+ .enable = false,
+ },
+ {
+ .port_type = CAM_CAMNOC_ICP,
+ .enable = false,
+ }
+};
+
+static struct cam_camnoc_err_logger_info cam170_cpas100_err_logger_offsets = {
+ .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
+ .camnoc_flush_slave_pending_trans = {
+ .enable = true,
+ .data.reg_info = {
+ .access_type = CAM_REG_TYPE_READ,
+ .offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+ .mask = 0xE0000, /* Bits 17, 18, 19 */
+ .value = 0, /* expected to be 0 */
+ },
+ },
+};
+
+struct cam_camnoc_info cam170_cpas100_camnoc_info = {
+ .specific = &cam_cpas100_camnoc_specific[0],
+ .specific_size = sizeof(cam_cpas100_camnoc_specific) /
+ sizeof(cam_cpas100_camnoc_specific[0]),
+ .irq_sbm = &cam_cpas100_irq_sbm,
+ .irq_err = &cam_cpas100_irq_err[0],
+ .irq_err_size = sizeof(cam_cpas100_irq_err) /
+ sizeof(cam_cpas100_irq_err[0]),
+ .err_logger = &cam170_cpas100_err_logger_offsets,
+ .errata_wa_list = &cam170_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP100_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v170_110.h
new file mode 100644
index 000000000000..0c7c799a9120
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -0,0 +1,555 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CPASTOP_V170_110_H_
+#define _CPASTOP_V170_110_H_
+
+#define TEST_IRQ_ENABLE 0
+#define TCSR_CONN_RESET 0x0
+#define TCSR_CONN_SET 0x3
+
+static struct cam_camnoc_irq_sbm cam_cpas110_irq_sbm = {
+ .sbm_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+ .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+ 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+ 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+ 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+ 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+ 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+ (TEST_IRQ_ENABLE ?
+ 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+ 0x0),
+ },
+ .sbm_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+ },
+ .sbm_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+ .value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+ }
+};
+
+static struct cam_camnoc_irq_err
+ cam_cpas110_irq_err[] = {
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+ .enable = true,
+ .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x1190,
+ /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+ .enable = true,
+ .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+ .enable = TEST_IRQ_ENABLE ? true : false,
+ .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x5,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+};
+
+static struct cam_camnoc_specific
+ cam_cpas110_camnoc_specific[] = {
+ {
+ .port_type = CAM_CAMNOC_CDM,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+ .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+ .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+ .value = 0x2,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE02,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+ .value = 0x66666543,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+ .value = 0x1,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE13,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+ .value = 0x66666543,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+ .value = 0x1,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+ .mask = 0x7,
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+ .shift = 0x0,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_JPEG,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+ .value = 0x22,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_FD,
+ .enable = false,
+ },
+ {
+ .port_type = CAM_CAMNOC_ICP,
+ .enable = true,
+ .flag_out_set0_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_WRITE,
+ .masked_value = 0,
+ .offset = 0x2088,
+ .value = 0x100000,
+ },
+ },
+};
+
+static struct cam_camnoc_err_logger_info cam170_cpas110_err_logger_offsets = {
+ .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
+ .camnoc_flush_slave_pending_trans = {
+ .enable = false,
+ .data.reg_info = {
+ .access_type = CAM_REG_TYPE_READ,
+ .offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+ .mask = 0xE0000, /* Bits 17, 18, 19 */
+ .value = 0, /* expected to be 0 */
+ },
+ },
+ .tcsr_reg = {
+ .tcsr_conn_box_spare_0 = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xB3E4,
+ },
+ },
+};
+
+static struct cam_camnoc_info cam170_cpas110_camnoc_info = {
+ .specific = &cam_cpas110_camnoc_specific[0],
+ .specific_size = sizeof(cam_cpas110_camnoc_specific) /
+ sizeof(cam_cpas110_camnoc_specific[0]),
+ .irq_sbm = &cam_cpas110_irq_sbm,
+ .irq_err = &cam_cpas110_irq_err[0],
+ .irq_err_size = sizeof(cam_cpas110_irq_err) /
+ sizeof(cam_cpas110_irq_err[0]),
+ .err_logger = &cam170_cpas110_err_logger_offsets,
+ .errata_wa_list = &cam170_cpas110_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V170_110_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v175_100.h b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v175_100.h
new file mode 100644
index 000000000000..8673873d36fc
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v175_100.h
@@ -0,0 +1,565 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CPASTOP_V175_100_H_
+#define _CPASTOP_V175_100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v175_100_irq_sbm = {
+ .sbm_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+ .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+ 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+ 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+ 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+ 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+ 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+ (TEST_IRQ_ENABLE ?
+ 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+ 0x0),
+ },
+ .sbm_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+ },
+ .sbm_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+ .value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+ }
+};
+
+static struct cam_camnoc_irq_err
+ cam_cpas_v175_100_irq_err[] = {
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+ .enable = true,
+ .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x1190,
+ /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+ .enable = true,
+ .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+ .enable = TEST_IRQ_ENABLE ? true : false,
+ .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x5,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+};
+
+static struct cam_camnoc_specific
+ cam_cpas_v175_100_camnoc_specific[] = {
+ {
+ .port_type = CAM_CAMNOC_CDM,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+ .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+ .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+ .value = 0x2,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE02,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+ .value = 0x66666543,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+ .value = 0x1,
+ },
+ .ubwc_ctl = {
+ /*
+ * Do not explicitly set ubwc config register.
+ * Power on default values are taking care of required
+ * register settings.
+ */
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE13,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+ .value = 0x66666543,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+ .value = 0x1,
+ },
+ .ubwc_ctl = {
+ /*
+ * Do not explicitly set ubwc config register.
+ * Power on default values are taking care of required
+ * register settings.
+ */
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+ .mask = 0x7,
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+ .shift = 0x0,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ /*
+ * Do not explicitly set ubwc config register.
+ * Power on default values are taking care of required
+ * register settings.
+ */
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ /*
+ * Do not explicitly set ubwc config register.
+ * Power on default values are taking care of required
+ * register settings.
+ */
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_JPEG,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+ .value = 0x22,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_FD,
+ .enable = false,
+ },
+ {
+ .port_type = CAM_CAMNOC_ICP,
+ .enable = true,
+ .flag_out_set0_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_WRITE,
+ .masked_value = 0,
+ .offset = 0x2088,
+ .value = 0x100000,
+ },
+ },
+};
+
+static struct cam_camnoc_err_logger_info cam175_cpas100_err_logger_offsets = {
+ .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam175_cpas100_errata_wa_list = {
+ .camnoc_flush_slave_pending_trans = {
+ .enable = false,
+ .data.reg_info = {
+ .access_type = CAM_REG_TYPE_READ,
+ .offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+ .mask = 0xE0000, /* Bits 17, 18, 19 */
+ .value = 0, /* expected to be 0 */
+ },
+ },
+};
+
+static struct cam_camnoc_info cam175_cpas100_camnoc_info = {
+ .specific = &cam_cpas_v175_100_camnoc_specific[0],
+ .specific_size = sizeof(cam_cpas_v175_100_camnoc_specific) /
+ sizeof(cam_cpas_v175_100_camnoc_specific[0]),
+ .irq_sbm = &cam_cpas_v175_100_irq_sbm,
+ .irq_err = &cam_cpas_v175_100_irq_err[0],
+ .irq_err_size = sizeof(cam_cpas_v175_100_irq_err) /
+ sizeof(cam_cpas_v175_100_irq_err[0]),
+ .err_logger = &cam175_cpas100_err_logger_offsets,
+ .errata_wa_list = &cam175_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V175_100_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v175_101.h b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v175_101.h
new file mode 100644
index 000000000000..d506cc5fd4f5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/cpas_top/cpastop_v175_101.h
@@ -0,0 +1,565 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CPASTOP_V175_101_H_
+#define _CPASTOP_V175_101_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v175_101_irq_sbm = {
+ .sbm_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+ .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+ 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+ 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+ 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+ 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+ 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+ (TEST_IRQ_ENABLE ?
+ 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+ 0x0),
+ },
+ .sbm_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+ },
+ .sbm_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+ .value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+ }
+};
+
+static struct cam_camnoc_irq_err
+ cam_cpas_v175_101_irq_err[] = {
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+ .enable = true,
+ .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x1190,
+ /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+ .enable = true,
+ .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+ .enable = TEST_IRQ_ENABLE ? true : false,
+ .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x5,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+};
+
+static struct cam_camnoc_specific
+ cam_cpas_v175_101_camnoc_specific[] = {
+ {
+ .port_type = CAM_CAMNOC_CDM,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+ .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+ .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+ .value = 0x2,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE02,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+ .value = 0x66666543,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+ .value = 0x1,
+ },
+ .ubwc_ctl = {
+ /*
+ * Do not explicitly set ubwc config register.
+ * Power on default values are taking care of required
+ * register settings.
+ */
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE13,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+ .value = 0x66666543,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+ .value = 0x1,
+ },
+ .ubwc_ctl = {
+ /*
+ * Do not explicitly set ubwc config register.
+ * Power on default values are taking care of required
+ * register settings.
+ */
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+ .mask = 0x7,
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+ .shift = 0x0,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ /*
+ * Do not explicitly set ubwc config register.
+ * Power on default values are taking care of required
+ * register settings.
+ */
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ /*
+ * Do not explicitly set ubwc config register.
+ * Power on default values are taking care of required
+ * register settings.
+ */
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_JPEG,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+ .value = 0x22,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_FD,
+ .enable = false,
+ },
+ {
+ .port_type = CAM_CAMNOC_ICP,
+ .enable = true,
+ .flag_out_set0_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_WRITE,
+ .masked_value = 0,
+ .offset = 0x2088,
+ .value = 0x100000,
+ },
+ },
+};
+
+static struct cam_camnoc_err_logger_info cam175_cpas101_err_logger_offsets = {
+ .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam175_cpas101_errata_wa_list = {
+ .camnoc_flush_slave_pending_trans = {
+ .enable = false,
+ .data.reg_info = {
+ .access_type = CAM_REG_TYPE_READ,
+ .offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+ .mask = 0xE0000, /* Bits 17, 18, 19 */
+ .value = 0, /* expected to be 0 */
+ },
+ },
+};
+
+static struct cam_camnoc_info cam175_cpas101_camnoc_info = {
+ .specific = &cam_cpas_v175_101_camnoc_specific[0],
+ .specific_size = sizeof(cam_cpas_v175_101_camnoc_specific) /
+ sizeof(cam_cpas_v175_101_camnoc_specific[0]),
+ .irq_sbm = &cam_cpas_v175_101_irq_sbm,
+ .irq_err = &cam_cpas_v175_101_irq_err[0],
+ .irq_err_size = sizeof(cam_cpas_v175_101_irq_err) /
+ sizeof(cam_cpas_v175_101_irq_err[0]),
+ .err_logger = &cam175_cpas101_err_logger_offsets,
+ .errata_wa_list = &cam175_cpas101_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V175_101_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera_v3/cam_cpas/include/cam_cpas_api.h
new file mode 100644
index 000000000000..5dbd94a1fa9b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_cpas/include/cam_cpas_api.h
@@ -0,0 +1,527 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_API_H_
+#define _CAM_CPAS_API_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <media/cam_cpas.h>
+#include "cam_soc_util.h"
+
+#define CAM_HW_IDENTIFIER_LENGTH 128
+
+/* Default AXI Bandwidth vote */
+#define CAM_CPAS_DEFAULT_AXI_BW 1024
+
+/**
+ * enum cam_cpas_reg_base - Enum for register base identifier. These
+ * are the identifiers used in generic register
+ * write/read APIs provided by cpas driver.
+ */
+enum cam_cpas_reg_base {
+ CAM_CPAS_REG_CPASTOP,
+ CAM_CPAS_REG_CAMNOC,
+ CAM_CPAS_REG_CSR_TCSR,
+ CAM_CPAS_REG_CAMSS,
+ CAM_CPAS_REG_MAX
+};
+
+/**
+ * enum cam_cpas_hw_version - Enum for Titan CPAS HW Versions
+ */
+enum cam_cpas_hw_version {
+ CAM_CPAS_TITAN_NONE = 0,
+ CAM_CPAS_TITAN_150_V100 = 0x150100,
+ CAM_CPAS_TITAN_170_V100 = 0x170100,
+ CAM_CPAS_TITAN_170_V110 = 0x170110,
+ CAM_CPAS_TITAN_170_V120 = 0x170120,
+ CAM_CPAS_TITAN_175_V100 = 0x175100,
+ CAM_CPAS_TITAN_175_V101 = 0x175101,
+ CAM_CPAS_TITAN_175_V120 = 0x175120,
+ CAM_CPAS_TITAN_MAX
+};
+
+
+/**
+ * enum cam_camnoc_irq_type - Enum for camnoc irq types
+ *
+ * @CAM_CAMNOC_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ * 1 QHB port) has an error logger. The error
+ * observed at any slave port is logged into
+ * the error logger register and an IRQ is
+ * triggered
+ * @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR : Triggered if any error detected
+ * in the IFE0 UBWC encoder instance
+ * @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR : Triggered if any error detected
+ * in the IFE1 or IFE3 UBWC encoder
+ * instance
+ * @CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR : Triggered if any error detected
+ * in the IFE0 UBWC encoder instance
+ * @CAM_CAMNOC_IRQ_IFE1_WR_UBWC_ENCODE_ERROR : Triggered if any error detected
+ * in the IFE1 UBWC encoder
+ * instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
+ * in the IPE/BPS UBWC decoder
+ * instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error detected
+ * in the IPE/BPS UBWC encoder
+ * instance
+ * @CAM_CAMNOC_IRQ_AHB_TIMEOUT : Triggered when the QHS_ICP slave
+ * times out after 4000 AHB cycles
+ */
+enum cam_camnoc_irq_type {
+ CAM_CAMNOC_IRQ_SLAVE_ERROR,
+ CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_IRQ_IFE0_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+ CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+ CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+};
+
+/**
+ * struct cam_camnoc_irq_slave_err_data : Data for Slave error.
+ *
+ * @mainctrl : Err logger mainctrl info
+ * @errvld : Err logger errvld info
+ * @errlog0_low : Err logger errlog0_low info
+ * @errlog0_high : Err logger errlog0_high info
+ * @errlog1_low : Err logger errlog1_low info
+ * @errlog1_high : Err logger errlog1_high info
+ * @errlog2_low : Err logger errlog2_low info
+ * @errlog2_high : Err logger errlog2_high info
+ * @errlog3_low : Err logger errlog3_low info
+ * @errlog3_high : Err logger errlog3_high info
+ *
+ */
+struct cam_camnoc_irq_slave_err_data {
+ union {
+ struct {
+ uint32_t stall_en : 1; /* bit 0 */
+ uint32_t fault_en : 1; /* bit 1 */
+ uint32_t rsv : 30; /* bits 2-31 */
+ };
+ uint32_t value;
+ } mainctrl;
+ union {
+ struct {
+ uint32_t err_vld : 1; /* bit 0 */
+ uint32_t rsv : 31; /* bits 1-31 */
+ };
+ uint32_t value;
+ } errvld;
+ union {
+ struct {
+ uint32_t loginfo_vld : 1; /* bit 0 */
+ uint32_t word_error : 1; /* bit 1 */
+ uint32_t non_secure : 1; /* bit 2 */
+ uint32_t device : 1; /* bit 3 */
+ uint32_t opc : 3; /* bits 4 - 6 */
+ uint32_t rsv0 : 1; /* bit 7 */
+ uint32_t err_code : 3; /* bits 8 - 10 */
+ uint32_t sizef : 3; /* bits 11 - 13 */
+ uint32_t rsv1 : 2; /* bits 14 - 15 */
+ uint32_t addr_space : 6; /* bits 16 - 21 */
+ uint32_t rsv2 : 10; /* bits 22 - 31 */
+ };
+ uint32_t value;
+ } errlog0_low;
+ union {
+ struct {
+ uint32_t len1 : 10; /* bits 0 - 9 */
+ uint32_t rsv : 22; /* bits 10 - 31 */
+ };
+ uint32_t value;
+ } errlog0_high;
+ union {
+ struct {
+ uint32_t path : 16; /* bits 0 - 15 */
+ uint32_t rsv : 16; /* bits 16 - 31 */
+ };
+ uint32_t value;
+ } errlog1_low;
+ union {
+ struct {
+ uint32_t extid : 18; /* bits 0 - 17 */
+ uint32_t rsv : 14; /* bits 18 - 31 */
+ };
+ uint32_t value;
+ } errlog1_high;
+ union {
+ struct {
+ uint32_t errlog2_lsb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog2_low;
+ union {
+ struct {
+ uint32_t errlog2_msb : 16; /* bits 0 - 16 */
+ uint32_t rsv : 16; /* bits 16 - 31 */
+ };
+ uint32_t value;
+ } errlog2_high;
+ union {
+ struct {
+ uint32_t errlog3_lsb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog3_low;
+ union {
+ struct {
+ uint32_t errlog3_msb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_enc_data : Data for UBWC Encode error.
+ *
+ * @encerr_status : Encode error status
+ *
+ */
+struct cam_camnoc_irq_ubwc_enc_data {
+ union {
+ struct {
+ uint32_t encerrstatus : 3; /* bits 0 - 2 */
+ uint32_t rsv : 29; /* bits 3 - 31 */
+ };
+ uint32_t value;
+ } encerr_status;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_dec_data : Data for UBWC Decode error.
+ *
+ * @decerr_status : Decoder error status
+ * @thr_err : Set to 1 if
+ * At least one of the bflc_len fields in the bit steam exceeds
+ * its threshold value. This error is possible only for
+ * RGBA1010102, TP10, and RGB565 formats
+ * @fcl_err : Set to 1 if
+ * Fast clear with a legal non-RGB format
+ * @len_md_err : Set to 1 if
+ * The calculated burst length does not match burst length
+ * specified by the metadata value
+ * @format_err : Set to 1 if
+ * Illegal format
+ * 1. bad format :2,3,6
+ * 2. For 32B MAL, metadata=6
+ * 3. For 32B MAL RGB565, Metadata != 0,1,7
+ * 4. For 64B MAL RGB565, metadata[3:1] == 1,2
+ *
+ */
+struct cam_camnoc_irq_ubwc_dec_data {
+ union {
+ struct {
+ uint32_t thr_err : 1; /* bit 0 */
+ uint32_t fcl_err : 1; /* bit 1 */
+ uint32_t len_md_err : 1; /* bit 2 */
+ uint32_t format_err : 1; /* bit 3 */
+ uint32_t rsv : 28; /* bits 4 - 31 */
+ };
+ uint32_t value;
+ } decerr_status;
+};
+
+struct cam_camnoc_irq_ahb_timeout_data {
+ uint32_t data;
+};
+
+/**
+ * struct cam_cpas_irq_data : CAMNOC IRQ data
+ *
+ * @irq_type : To identify the type of IRQ
+ * @u : Union of irq err data information
+ * @slave_err : Data for Slave error.
+ * Valid if type is CAM_CAMNOC_IRQ_SLAVE_ERROR
+ * @enc_err : Data for UBWC Encode error.
+ * Valid if type is one of below:
+ * CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR
+ * CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR
+ * CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR
+ * @dec_err : Data for UBWC Decode error.
+ * Valid if type is CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR
+ * @ahb_err : Data for Slave error.
+ * Valid if type is CAM_CAMNOC_IRQ_AHB_TIMEOUT
+ *
+ */
+struct cam_cpas_irq_data {
+ enum cam_camnoc_irq_type irq_type;
+ union {
+ struct cam_camnoc_irq_slave_err_data slave_err;
+ struct cam_camnoc_irq_ubwc_enc_data enc_err;
+ struct cam_camnoc_irq_ubwc_dec_data dec_err;
+ struct cam_camnoc_irq_ahb_timeout_data ahb_err;
+ } u;
+};
+
+/**
+ * struct cam_cpas_register_params : Register params for cpas client
+ *
+ * @identifier : Input identifier string which is the device label
+ * from dt like vfe, ife, jpeg etc
+ * @cell_index : Input integer identifier pointing to the cell index
+ * from dt of the device. This can be used to form a
+ * unique string with @identifier like vfe0, ife1,
+ * jpeg0, etc
+ * @dev : device handle
+ * @userdata : Input private data which will be passed as
+ * an argument while callback.
+ * @cam_cpas_callback : Input callback pointer for triggering the
+ * callbacks from CPAS driver.
+ * @client_handle : CPAS client handle
+ * @userdata : User data given at the time of register
+ * @event_type : event type
+ * @event_data : event data
+ * @client_handle : Output Unique handle generated for this register
+ *
+ */
+struct cam_cpas_register_params {
+ char identifier[CAM_HW_IDENTIFIER_LENGTH];
+ uint32_t cell_index;
+ struct device *dev;
+ void *userdata;
+ bool (*cam_cpas_client_cb)(
+ uint32_t client_handle,
+ void *userdata,
+ struct cam_cpas_irq_data *irq_data);
+ uint32_t client_handle;
+};
+
+/**
+ * enum cam_vote_type - Enum for voting type
+ *
+ * @CAM_VOTE_ABSOLUTE : Absolute vote
+ * @CAM_VOTE_DYNAMIC : Dynamic vote
+ */
+enum cam_vote_type {
+ CAM_VOTE_ABSOLUTE,
+ CAM_VOTE_DYNAMIC,
+};
+
+/**
+ * struct cam_ahb_vote : AHB vote
+ *
+ * @type : AHB voting type.
+ * CAM_VOTE_ABSOLUTE : vote based on the value 'level' is set
+ * CAM_VOTE_DYNAMIC : vote calculated dynamically using 'freq'
+ * and 'dev' handle is set
+ * @level : AHB vote level
+ * @freq : AHB vote dynamic frequency
+ *
+ */
+struct cam_ahb_vote {
+ enum cam_vote_type type;
+ union {
+ enum cam_vote_level level;
+ unsigned long freq;
+ } vote;
+};
+
+/**
+ * struct cam_axi_vote : AXI vote
+ *
+ * @uncompressed_bw : Bus bandwidth required in Bytes for uncompressed data
+ * This is the required bandwidth for uncompressed
+ * data traffic between hw core and camnoc.
+ * @compressed_bw : Bus bandwidth required in Bytes for compressed data.
+ * This is the required bandwidth for compressed
+ * data traffic between camnoc and mmnoc.
+ *
+ * If one of the above is not applicable to a hw client, it has to
+ * fill the same values in both.
+ *
+ */
+struct cam_axi_vote {
+ uint64_t uncompressed_bw;
+ uint64_t compressed_bw;
+};
+
+/**
+ * cam_cpas_register_client()
+ *
+ * @brief: API to register cpas client
+ *
+ * @register_params: Input params to register as a client to CPAS
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_register_client(
+ struct cam_cpas_register_params *register_params);
+
+/**
+ * cam_cpas_unregister_client()
+ *
+ * @brief: API to unregister cpas client
+ *
+ * @client_handle: Client handle to be unregistered
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_unregister_client(uint32_t client_handle);
+
+/**
+ * cam_cpas_start()
+ *
+ * @brief: API to start cpas client hw. Clients have to vote for minimal
+ * bandwidth requirements for AHB, AXI. Use cam_cpas_update_ahb_vote
+ * to scale bandwidth after start.
+ *
+ * @client_handle: client cpas handle
+ * @ahb_vote : Pointer to ahb vote info
+ * @axi_vote : Pointer to axi bandwidth vote info
+ *
+ * If AXI vote is not applicable to a particular client, use the value exposed
+ * by CAM_CPAS_DEFAULT_AXI_BW as the default vote request.
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_start(
+ uint32_t client_handle,
+ struct cam_ahb_vote *ahb_vote,
+ struct cam_axi_vote *axi_vote);
+
+/**
+ * cam_cpas_stop()
+ *
+ * @brief: API to stop cpas client hw. Bandwidth for AHB, AXI votes
+ * would be removed for this client on this call. Clients should not
+ * use cam_cpas_update_ahb_vote or cam_cpas_update_axi_vote
+ * to remove their bandwidth vote.
+ *
+ * @client_handle: client cpas handle
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_stop(uint32_t client_handle);
+
+/**
+ * cam_cpas_update_ahb_vote()
+ *
+ * @brief: API to update AHB vote requirement. Use this function only
+ * between cam_cpas_start and cam_cpas_stop in case clients wants
+ * to scale to different vote level. Do not use this function to de-vote,
+ * removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @ahb_vote : Pointer to ahb vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_ahb_vote(
+ uint32_t client_handle,
+ struct cam_ahb_vote *ahb_vote);
+
+/**
+ * cam_cpas_update_axi_vote()
+ *
+ * @brief: API to update AXI vote requirement. Use this function only
+ * between cam_cpas_start and cam_cpas_stop in case clients wants
+ * to scale to different vote level. Do not use this function to de-vote,
+ * removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @axi_vote : Pointer to axi bandwidth vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_axi_vote(
+ uint32_t client_handle,
+ struct cam_axi_vote *axi_vote);
+
+/**
+ * cam_cpas_reg_write()
+ *
+ * @brief: API to write a register value in CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base : Register base identifier
+ * @offset : Offset from the register base address
+ * @mb : Whether to do reg write with memory barrier
+ * @value : Value to be written in register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_write(
+ uint32_t client_handle,
+ enum cam_cpas_reg_base reg_base,
+ uint32_t offset,
+ bool mb,
+ uint32_t value);
+
+/**
+ * cam_cpas_reg_read()
+ *
+ * @brief: API to read a register value from CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base : Register base identifier
+ * @offset : Offset from the register base address
+ * @mb : Whether to do reg read with memory barrier
+ * @value : Value to be red from register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_read(
+ uint32_t client_handle,
+ enum cam_cpas_reg_base reg_base,
+ uint32_t offset,
+ bool mb,
+ uint32_t *value);
+
+/**
+ * cam_cpas_get_hw_info()
+ *
+ * @brief: API to get camera hw information
+ *
+ * @camera_family : Camera family type. One of
+ * CAM_FAMILY_CAMERA_SS
+ * CAM_FAMILY_CPAS_SS
+ * @camera_version : Camera platform version
+ * @cpas_version : Camera cpas version
+ * @cam_caps : Camera capability
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_hw_info(
+ uint32_t *camera_family,
+ struct cam_hw_version *camera_version,
+ struct cam_hw_version *cpas_version,
+ uint32_t *cam_caps);
+
+/**
+ * cam_cpas_get_cpas_hw_version()
+ *
+ * @brief: API to get camera cpas hw version
+ *
+ * @hw_version : Camera cpas hw version
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_cpas_hw_version(
+ uint32_t *hw_version);
+
+#endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/Makefile b/drivers/media/platform/msm/camera_v3/cam_fd/Makefile
new file mode 100644
index 000000000000..ed896e511963
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_dev.o cam_fd_context.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_context.c
new file mode 100644
index 000000000000..99c509c62809
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_context.c
@@ -0,0 +1,256 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_fd_context.h"
+#include "cam_trace.h"
+
+static const char fd_dev_name[] = "fd";
+
+/* Functions in Available state */
+static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in Acquire dev, rc=%d", rc);
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("FD", ctx);
+
+ return rc;
+}
+
+/* Functions in Acquired state */
+static int __cam_fd_ctx_release_dev_in_acquired(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_AVAILABLE;
+ trace_cam_context_state("FD", ctx);
+
+ return rc;
+}
+
+static int __cam_fd_ctx_config_dev_in_acquired(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __cam_fd_ctx_start_dev_in_acquired(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_start_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in Start dev, rc=%d", rc);
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACTIVATED;
+ trace_cam_context_state("FD", ctx);
+
+ return rc;
+}
+
+/* Functions in Activated state */
+static int __cam_fd_ctx_stop_dev_in_activated(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_stop_dev_to_hw(ctx);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("FD", ctx);
+
+ return rc;
+}
+
+static int __cam_fd_ctx_release_dev_in_activated(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = __cam_fd_ctx_stop_dev_in_activated(ctx, NULL);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
+ return rc;
+ }
+
+ rc = __cam_fd_ctx_release_dev_in_acquired(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __cam_fd_ctx_flush_dev_in_activated(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_flush_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to flush device, rc=%d", rc);
+
+ return rc;
+}
+static int __cam_fd_ctx_config_dev_in_activated(
+ struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __cam_fd_ctx_handle_irq_in_activated(void *context,
+ uint32_t evt_id, void *evt_data)
+{
+ int rc;
+
+ rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in buf done, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+ cam_fd_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+ /* Uninit */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Available */
+ {
+ .ioctl_ops = {
+ .acquire_dev = __cam_fd_ctx_acquire_dev_in_available,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Acquired */
+ {
+ .ioctl_ops = {
+ .release_dev = __cam_fd_ctx_release_dev_in_acquired,
+ .config_dev = __cam_fd_ctx_config_dev_in_acquired,
+ .start_dev = __cam_fd_ctx_start_dev_in_acquired,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Ready */
+ {
+ .ioctl_ops = { },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Activated */
+ {
+ .ioctl_ops = {
+ .stop_dev = __cam_fd_ctx_stop_dev_in_activated,
+ .release_dev = __cam_fd_ctx_release_dev_in_activated,
+ .config_dev = __cam_fd_ctx_config_dev_in_activated,
+ .flush_dev = __cam_fd_ctx_flush_dev_in_activated,
+ },
+ .crm_ops = {},
+ .irq_ops = __cam_fd_ctx_handle_irq_in_activated,
+ },
+};
+
+
+int cam_fd_context_init(struct cam_fd_context *fd_ctx,
+ struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+ uint32_t ctx_id)
+{
+ int rc;
+
+ if (!base_ctx || !fd_ctx) {
+ CAM_ERR(CAM_FD, "Invalid Context %pK %pK", base_ctx, fd_ctx);
+ return -EINVAL;
+ }
+
+ memset(fd_ctx, 0, sizeof(*fd_ctx));
+
+ rc = cam_context_init(base_ctx, fd_dev_name, CAM_FD, ctx_id,
+ NULL, hw_intf, fd_ctx->req_base, CAM_CTX_REQ_MAX);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Camera Context Base init failed, rc=%d", rc);
+ return rc;
+ }
+
+ fd_ctx->base = base_ctx;
+ base_ctx->ctx_priv = fd_ctx;
+ base_ctx->state_machine = cam_fd_ctx_state_machine;
+
+ return rc;
+}
+
+int cam_fd_context_deinit(struct cam_fd_context *fd_ctx)
+{
+ int rc = 0;
+
+ if (!fd_ctx || !fd_ctx->base) {
+ CAM_ERR(CAM_FD, "Invalid inputs %pK", fd_ctx);
+ return -EINVAL;
+ }
+
+ rc = cam_context_deinit(fd_ctx->base);
+ if (rc)
+ CAM_ERR(CAM_FD, "Error in base deinit, rc=%d", rc);
+
+ memset(fd_ctx, 0, sizeof(*fd_ctx));
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_context.h b/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_context.h
new file mode 100644
index 000000000000..a8b5d159f265
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_context.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_CONTEXT_H_
+#define _CAM_FD_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+/**
+ * struct cam_fd_context - Face Detection context information
+ *
+ * @base : Base context pointer for this FD context
+ * @req_base : List of base requests for this FD context
+ */
+struct cam_fd_context {
+ struct cam_context *base;
+ struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+};
+
+int cam_fd_context_init(struct cam_fd_context *fd_ctx,
+ struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+ uint32_t ctx_id);
+int cam_fd_context_deinit(struct cam_fd_context *ctx);
+
+#endif /* _CAM_FD_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_dev.c b/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_dev.c
new file mode 100644
index 000000000000..d5068ca26971
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/cam_fd_dev.c
@@ -0,0 +1,214 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_fd_context.h"
+#include "cam_fd_hw_mgr.h"
+#include "cam_fd_hw_mgr_intf.h"
+
+#define CAM_FD_DEV_NAME "cam-fd"
+
+/**
+ * struct cam_fd_dev - FD device information
+ *
+ * @sd: Subdev information
+ * @base_ctx: List of base contexts
+ * @fd_ctx: List of FD contexts
+ * @lock: Mutex handle
+ * @open_cnt: FD subdev open count
+ * @probe_done: Whether FD probe is completed
+ */
+struct cam_fd_dev {
+ struct cam_subdev sd;
+ struct cam_context base_ctx[CAM_CTX_MAX];
+ struct cam_fd_context fd_ctx[CAM_CTX_MAX];
+ struct mutex lock;
+ uint32_t open_cnt;
+ bool probe_done;
+};
+
+static struct cam_fd_dev g_fd_dev;
+
+static int cam_fd_dev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_fd_dev *fd_dev = &g_fd_dev;
+
+ if (!fd_dev->probe_done) {
+ CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
+ return -ENODEV;
+ }
+
+ mutex_lock(&fd_dev->lock);
+ fd_dev->open_cnt++;
+ CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
+ mutex_unlock(&fd_dev->lock);
+
+ return 0;
+}
+
+static int cam_fd_dev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_fd_dev *fd_dev = &g_fd_dev;
+ struct cam_node *node = v4l2_get_subdevdata(sd);
+
+ if (!fd_dev->probe_done) {
+ CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
+ return -ENODEV;
+ }
+
+ mutex_lock(&fd_dev->lock);
+ fd_dev->open_cnt--;
+ CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
+ mutex_unlock(&fd_dev->lock);
+
+ if (!node) {
+ CAM_ERR(CAM_FD, "Node ptr is NULL");
+ return -EINVAL;
+ }
+
+ cam_node_shutdown(node);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops cam_fd_subdev_internal_ops = {
+ .open = cam_fd_dev_open,
+ .close = cam_fd_dev_close,
+};
+
+static int cam_fd_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+ struct cam_hw_mgr_intf hw_mgr_intf;
+ struct cam_node *node;
+
+ g_fd_dev.sd.internal_ops = &cam_fd_subdev_internal_ops;
+
+ /* Initialze the v4l2 subdevice first. (create cam_node) */
+ rc = cam_subdev_probe(&g_fd_dev.sd, pdev, CAM_FD_DEV_NAME,
+ CAM_FD_DEVICE_TYPE);
+ if (rc) {
+ CAM_ERR(CAM_FD, "FD cam_subdev_probe failed, rc=%d", rc);
+ return rc;
+ }
+ node = (struct cam_node *) g_fd_dev.sd.token;
+
+ rc = cam_fd_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in initializing FD HW manager, rc=%d",
+ rc);
+ goto unregister_subdev;
+ }
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_fd_context_init(&g_fd_dev.fd_ctx[i],
+ &g_fd_dev.base_ctx[i], &node->hw_mgr_intf, i);
+ if (rc) {
+ CAM_ERR(CAM_FD, "FD context init failed i=%d, rc=%d",
+ i, rc);
+ goto deinit_ctx;
+ }
+ }
+
+ rc = cam_node_init(node, &hw_mgr_intf, g_fd_dev.base_ctx, CAM_CTX_MAX,
+ CAM_FD_DEV_NAME);
+ if (rc) {
+ CAM_ERR(CAM_FD, "FD node init failed, rc=%d", rc);
+ goto deinit_ctx;
+ }
+
+ mutex_init(&g_fd_dev.lock);
+ g_fd_dev.probe_done = true;
+
+ CAM_DBG(CAM_FD, "Camera FD probe complete");
+
+ return 0;
+
+deinit_ctx:
+ for (--i; i >= 0; i--) {
+ if (cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]))
+ CAM_ERR(CAM_FD, "FD context %d deinit failed", i);
+ }
+unregister_subdev:
+ if (cam_subdev_remove(&g_fd_dev.sd))
+ CAM_ERR(CAM_FD, "Failed in subdev remove");
+
+ return rc;
+}
+
+static int cam_fd_dev_remove(struct platform_device *pdev)
+{
+ int i, rc;
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]);
+ if (rc)
+ CAM_ERR(CAM_FD, "FD context %d deinit failed, rc=%d",
+ i, rc);
+ }
+
+ rc = cam_fd_hw_mgr_deinit(pdev->dev.of_node);
+ if (rc)
+ CAM_ERR(CAM_FD, "Failed in hw mgr deinit, rc=%d", rc);
+
+ rc = cam_subdev_remove(&g_fd_dev.sd);
+ if (rc)
+ CAM_ERR(CAM_FD, "Unregister failed, rc=%d", rc);
+
+ mutex_destroy(&g_fd_dev.lock);
+ g_fd_dev.probe_done = false;
+
+ return rc;
+}
+
+static const struct of_device_id cam_fd_dt_match[] = {
+ {
+ .compatible = "qcom,cam-fd"
+ },
+ {}
+};
+
+static struct platform_driver cam_fd_driver = {
+ .probe = cam_fd_dev_probe,
+ .remove = cam_fd_dev_remove,
+ .driver = {
+ .name = "cam_fd",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_fd_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_fd_dev_init_module(void)
+{
+ return platform_driver_register(&cam_fd_driver);
+}
+
+static void __exit cam_fd_dev_exit_module(void)
+{
+ platform_driver_unregister(&cam_fd_driver);
+}
+
+module_init(cam_fd_dev_init_module);
+module_exit(cam_fd_dev_exit_module);
+MODULE_DESCRIPTION("MSM FD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/Makefile b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/Makefile
new file mode 100644
index 000000000000..d6e76d18e799
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
new file mode 100644
index 000000000000..ec0a906cb99a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -0,0 +1,1943 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_fd_context.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_fd_hw_mgr_intf.h"
+#include "cam_fd_hw_mgr.h"
+#include "cam_trace.h"
+
+static struct cam_fd_hw_mgr g_fd_hw_mgr;
+
+static int cam_fd_mgr_util_packet_validate(struct cam_packet *packet)
+{
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ int i, rc;
+
+ if (!packet)
+ return -EINVAL;
+
+ CAM_DBG(CAM_FD, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+ packet->header.request_id, packet->header.op_code,
+ packet->header.size, packet->header.flags);
+ CAM_DBG(CAM_FD,
+ "Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+ packet->cmd_buf_offset, packet->num_cmd_buf,
+ packet->io_configs_offset, packet->num_io_configs);
+ CAM_DBG(CAM_FD,
+ "Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+ packet->patch_offset, packet->num_patches,
+ packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+ if (cam_packet_util_validate_packet(packet)) {
+ CAM_ERR(CAM_FD, "invalid packet:%d %d %d %d %d",
+ packet->kmd_cmd_buf_index,
+ packet->num_cmd_buf, packet->cmd_buf_offset,
+ packet->io_configs_offset, packet->header.size);
+ return -EINVAL;
+ }
+
+ /* All buffers must come through io config, do not support patching */
+ if (packet->num_patches || !packet->num_io_configs) {
+ CAM_ERR(CAM_FD, "wrong number of cmd/patch info: %u %u",
+ packet->num_cmd_buf, packet->num_patches);
+ return -EINVAL;
+ }
+
+ /* KMD Buf index can never be greater than or equal to num cmd bufs */
+ if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf) {
+ CAM_ERR(CAM_FD, "Invalid kmd index %d (%d)",
+ packet->kmd_cmd_buf_index, packet->num_cmd_buf);
+ return -EINVAL;
+ }
+
+ if ((packet->header.op_code & 0xff) !=
+ CAM_PACKET_OPCODES_FD_FRAME_UPDATE) {
+ CAM_ERR(CAM_FD, "Invalid op_code %u",
+ packet->header.op_code & 0xff);
+ return -EINVAL;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
+ packet->cmd_buf_offset);
+
+ for (i = 0; i < packet->num_cmd_buf; i++) {
+ /*
+ * We can allow 0 length cmd buffer. This can happen in case
+ * umd gives an empty cmd buffer as kmd buffer
+ */
+ if (!cmd_desc[i].length)
+ continue;
+
+ if ((cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_GENERIC) &&
+ (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)) {
+ CAM_ERR(CAM_FD, "Invalid meta_data [%d] %u", i,
+ cmd_desc[i].meta_data);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_FD,
+ "CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+ i,
+ cmd_desc[i].mem_handle, cmd_desc[i].offset,
+ cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+ cmd_desc[i].meta_data);
+
+ rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Invalid cmd buffer %d", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int cam_fd_mgr_util_put_ctx(
+ struct list_head *src_list,
+ struct cam_fd_hw_mgr_ctx **fd_ctx)
+{
+ int rc = 0;
+ struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
+
+ mutex_lock(&g_fd_hw_mgr.ctx_mutex);
+ ctx_ptr = *fd_ctx;
+ if (ctx_ptr)
+ list_add_tail(&ctx_ptr->list, src_list);
+ *fd_ctx = NULL;
+ mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
+
+ return rc;
+}
+
+static int cam_fd_mgr_util_get_ctx(
+ struct list_head *src_list,
+ struct cam_fd_hw_mgr_ctx **fd_ctx)
+{
+ int rc = 0;
+ struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
+
+ mutex_lock(&g_fd_hw_mgr.ctx_mutex);
+ if (!list_empty(src_list)) {
+ ctx_ptr = list_first_entry(src_list,
+ struct cam_fd_hw_mgr_ctx, list);
+ list_del_init(&ctx_ptr->list);
+ } else {
+ CAM_ERR(CAM_FD, "No more free fd hw mgr ctx");
+ rc = -1;
+ }
+ *fd_ctx = ctx_ptr;
+ mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
+
+ return rc;
+}
+
+static int cam_fd_mgr_util_put_frame_req(
+ struct list_head *src_list,
+ struct cam_fd_mgr_frame_request **frame_req)
+{
+ int rc = 0;
+ struct cam_fd_mgr_frame_request *req_ptr = NULL;
+
+ mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
+ req_ptr = *frame_req;
+ if (req_ptr)
+ list_add_tail(&req_ptr->list, src_list);
+ *frame_req = NULL;
+ mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
+
+ return rc;
+}
+
+static int cam_fd_mgr_util_get_frame_req(
+ struct list_head *src_list,
+ struct cam_fd_mgr_frame_request **frame_req)
+{
+ int rc = 0;
+ struct cam_fd_mgr_frame_request *req_ptr = NULL;
+
+ mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
+ if (!list_empty(src_list)) {
+ req_ptr = list_first_entry(src_list,
+ struct cam_fd_mgr_frame_request, list);
+ list_del_init(&req_ptr->list);
+ } else {
+ CAM_DBG(CAM_FD, "Frame req not available");
+ rc = -EPERM;
+ }
+ *frame_req = req_ptr;
+ mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
+
+ return rc;
+}
+
+static int cam_fd_mgr_util_get_device(struct cam_fd_hw_mgr *hw_mgr,
+ struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_fd_device **hw_device)
+{
+ if (!hw_mgr || !hw_ctx || !hw_device) {
+ CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
+ hw_device);
+ return -EINVAL;
+ }
+
+ if ((hw_ctx->device_index < 0) ||
+ (hw_ctx->device_index >= CAM_FD_HW_MAX)) {
+ CAM_ERR(CAM_FD, "Invalid device indx %d", hw_ctx->device_index);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_FD, "ctx_index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+ hw_ctx->device_index);
+
+ *hw_device = &hw_mgr->hw_device[hw_ctx->device_index];
+
+ return 0;
+}
+
+static int cam_fd_mgr_util_release_device(struct cam_fd_hw_mgr *hw_mgr,
+ struct cam_fd_hw_mgr_ctx *hw_ctx)
+{
+ struct cam_fd_device *hw_device;
+ struct cam_fd_hw_release_args hw_release_args;
+ int rc;
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ if (hw_device->hw_intf->hw_ops.release) {
+ hw_release_args.hw_ctx = hw_ctx;
+ hw_release_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+ rc = hw_device->hw_intf->hw_ops.release(
+ hw_device->hw_intf->hw_priv, &hw_release_args,
+ sizeof(hw_release_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HW release %d", rc);
+ return rc;
+ }
+ } else {
+ CAM_ERR(CAM_FD, "Invalid release function");
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ hw_device->num_ctxts--;
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ hw_ctx->device_index = -1;
+
+ return rc;
+}
+
+static int cam_fd_mgr_util_select_device(struct cam_fd_hw_mgr *hw_mgr,
+ struct cam_fd_hw_mgr_ctx *hw_ctx,
+ struct cam_fd_acquire_dev_info *fd_acquire_args)
+{
+ int i, rc;
+ struct cam_fd_hw_reserve_args hw_reserve_args;
+ struct cam_fd_device *hw_device = NULL;
+
+ if (!hw_mgr || !hw_ctx || !fd_acquire_args) {
+ CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
+ fd_acquire_args);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ /* Check if a device is free which can satisfy the requirements */
+ for (i = 0; i < hw_mgr->num_devices; i++) {
+ hw_device = &hw_mgr->hw_device[i];
+ CAM_DBG(CAM_FD,
+ "[%d] : num_ctxts=%d, modes=%d, raw_results=%d",
+ i, hw_device->num_ctxts,
+ hw_device->hw_caps.supported_modes,
+ hw_device->hw_caps.raw_results_available);
+ if ((hw_device->num_ctxts == 0) &&
+ (fd_acquire_args->mode &
+ hw_device->hw_caps.supported_modes) &&
+ (!fd_acquire_args->get_raw_results ||
+ hw_device->hw_caps.raw_results_available)) {
+ CAM_DBG(CAM_FD, "Found dedicated HW Index=%d", i);
+ hw_device->num_ctxts++;
+ break;
+ }
+ }
+
+ /*
+ * We couldn't find a free HW which meets requirement, now check if
+ * there is a HW which meets acquire requirements
+ */
+ if (i == hw_mgr->num_devices) {
+ for (i = 0; i < hw_mgr->num_devices; i++) {
+ hw_device = &hw_mgr->hw_device[i];
+ if ((fd_acquire_args->mode &
+ hw_device->hw_caps.supported_modes) &&
+ (!fd_acquire_args->get_raw_results ||
+ hw_device->hw_caps.raw_results_available)) {
+ hw_device->num_ctxts++;
+ CAM_DBG(CAM_FD, "Found sharing HW Index=%d", i);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ if ((i == hw_mgr->num_devices) || !hw_device) {
+ CAM_ERR(CAM_FD, "Couldn't acquire HW %d %d",
+ fd_acquire_args->mode,
+ fd_acquire_args->get_raw_results);
+ return -EBUSY;
+ }
+
+ CAM_DBG(CAM_FD, "Device index %d selected for this acquire", i);
+
+ /* Check if we can reserve this HW */
+ if (hw_device->hw_intf->hw_ops.reserve) {
+ hw_reserve_args.hw_ctx = hw_ctx;
+ hw_reserve_args.mode = fd_acquire_args->mode;
+ rc = hw_device->hw_intf->hw_ops.reserve(
+ hw_device->hw_intf->hw_priv, &hw_reserve_args,
+ sizeof(hw_reserve_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HW reserve %d", rc);
+ return rc;
+ }
+ hw_ctx->ctx_hw_private = hw_reserve_args.ctx_hw_private;
+ } else {
+ CAM_ERR(CAM_FD, "Invalid reserve function");
+ return -EPERM;
+ }
+
+ /* Update required info in hw context */
+ hw_ctx->device_index = i;
+
+ CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
+ hw_ctx->device_index);
+
+ return 0;
+}
+
+static int cam_fd_mgr_util_pdev_get_hw_intf(struct device_node *of_node,
+ int i, struct cam_hw_intf **device_hw_intf)
+{
+ struct device_node *device_node = NULL;
+ struct platform_device *child_pdev = NULL;
+ struct cam_hw_intf *hw_intf = NULL;
+ const char *name = NULL;
+ int rc;
+
+ rc = of_property_read_string_index(of_node, "compat-hw-name", i, &name);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Getting dev object name failed %d %d", i, rc);
+ goto put_node;
+ }
+
+ device_node = of_find_node_by_name(NULL, name);
+ if (!device_node) {
+ CAM_ERR(CAM_FD, "Cannot find node in dtsi %s", name);
+ return -ENODEV;
+ }
+
+ child_pdev = of_find_device_by_node(device_node);
+ if (!child_pdev) {
+ CAM_ERR(CAM_FD, "Failed to find device on bus %s",
+ device_node->name);
+ rc = -ENODEV;
+ goto put_node;
+ }
+
+ hw_intf = (struct cam_hw_intf *)platform_get_drvdata(child_pdev);
+ if (!hw_intf) {
+ CAM_ERR(CAM_FD, "No driver data for child device");
+ rc = -ENODEV;
+ goto put_node;
+ }
+
+ CAM_DBG(CAM_FD, "child type %d index %d child_intf %pK",
+ hw_intf->hw_type, hw_intf->hw_idx, hw_intf);
+
+ if (hw_intf->hw_idx >= CAM_FD_HW_MAX) {
+ CAM_ERR(CAM_FD, "hw_idx invalid %d", hw_intf->hw_idx);
+ rc = -ENODEV;
+ goto put_node;
+ }
+
+ rc = 0;
+ *device_hw_intf = hw_intf;
+
+put_node:
+ of_node_put(device_node);
+
+ return rc;
+}
+
+static int cam_fd_packet_generic_blob_handler(void *user_data,
+ uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+ struct cam_fd_hw_cmd_prestart_args *prestart_args =
+ (struct cam_fd_hw_cmd_prestart_args *)user_data;
+
+ if (!blob_data || (blob_size == 0)) {
+ CAM_ERR(CAM_FD, "Invalid blob info %pK %u", blob_data,
+ blob_size);
+ return -EINVAL;
+ }
+
+ if (!prestart_args) {
+ CAM_ERR(CAM_FD, "Invalid user data");
+ return -EINVAL;
+ }
+
+ switch (blob_type) {
+ case CAM_FD_BLOB_TYPE_RAW_RESULTS_REQUIRED: {
+ uint32_t *get_raw_results = (uint32_t *)blob_data;
+
+ if (sizeof(uint32_t) != blob_size) {
+ CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
+ sizeof(uint32_t), blob_size);
+ return -EINVAL;
+ }
+
+ prestart_args->get_raw_results = *get_raw_results;
+ break;
+ }
+ case CAM_FD_BLOB_TYPE_SOC_CLOCK_BW_REQUEST: {
+ struct cam_fd_soc_clock_bw_request *clk_req =
+ (struct cam_fd_soc_clock_bw_request *)blob_data;
+
+ if (sizeof(struct cam_fd_soc_clock_bw_request) != blob_size) {
+ CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
+ sizeof(struct cam_fd_soc_clock_bw_request),
+ blob_size);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_FD, "SOC Clk Request clock=%lld, bw=%lld",
+ clk_req->clock_rate, clk_req->bandwidth);
+
+ break;
+ }
+ default:
+ CAM_WARN(CAM_FD, "Unknown blob type %d", blob_type);
+ break;
+ }
+
+ return 0;
+}
+
+static int cam_fd_mgr_util_parse_generic_cmd_buffer(
+ struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_packet *packet,
+ struct cam_fd_hw_cmd_prestart_args *prestart_args)
+{
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ int i, rc = 0;
+
+ cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
+ packet->cmd_buf_offset);
+
+ for (i = 0; i < packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ if (cmd_desc[i].meta_data == CAM_FD_CMD_BUFFER_ID_CDM)
+ continue;
+
+ rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+ if (rc)
+ return rc;
+
+ rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
+ cam_fd_packet_generic_blob_handler, prestart_args);
+ if (rc)
+ CAM_ERR(CAM_FD, "Failed in processing blobs %d", rc);
+
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_fd_mgr_util_get_buf_map_requirement(uint32_t direction,
+ uint32_t resource_type, bool *need_io_map, bool *need_cpu_map)
+{
+ if (!need_io_map || !need_cpu_map) {
+ CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK", need_io_map,
+ need_cpu_map);
+ return -EINVAL;
+ }
+
+ if (direction == CAM_BUF_INPUT) {
+ switch (resource_type) {
+ case CAM_FD_INPUT_PORT_ID_IMAGE:
+ *need_io_map = true;
+ *need_cpu_map = false;
+ break;
+ default:
+ CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d",
+ direction, resource_type);
+ return -EINVAL;
+ }
+ } else if (direction == CAM_BUF_OUTPUT) {
+ switch (resource_type) {
+ case CAM_FD_OUTPUT_PORT_ID_RESULTS:
+ *need_io_map = true;
+ *need_cpu_map = true;
+ break;
+ case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS:
+ *need_io_map = true;
+ *need_cpu_map = true;
+ break;
+ case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER:
+ *need_io_map = true;
+ *need_cpu_map = false;
+ break;
+ default:
+ CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d",
+ direction, resource_type);
+ return -EINVAL;
+ }
+ } else {
+ CAM_WARN(CAM_FD, "Invalid direction %d", direction);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_fd_hw_io_buffer *input_buf,
+ struct cam_fd_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+ int rc = -EINVAL;
+ uint32_t i, j, plane, num_out_buf, num_in_buf;
+ struct cam_buf_io_cfg *io_cfg;
+ dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
+ uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES];
+ size_t size;
+ bool need_io_map, need_cpu_map;
+
+ /* Get IO Buf information */
+ num_out_buf = 0;
+ num_in_buf = 0;
+ io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+ &prepare->packet->payload + prepare->packet->io_configs_offset);
+
+ for (i = 0; i < prepare->packet->num_io_configs; i++) {
+ CAM_DBG(CAM_FD,
+ "IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+ i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+ io_cfg[i].resource_type,
+ io_cfg[i].fence, io_cfg[i].format);
+
+ if ((num_in_buf >= io_buf_size) ||
+ (num_out_buf >= io_buf_size)) {
+ CAM_ERR(CAM_FD, "Invalid number of buffers %d %d %d",
+ num_in_buf, num_out_buf, io_buf_size);
+ return -EINVAL;
+ }
+
+ rc = cam_fd_mgr_util_get_buf_map_requirement(
+ io_cfg[i].direction, io_cfg[i].resource_type,
+ &need_io_map, &need_cpu_map);
+ if (rc) {
+ CAM_WARN(CAM_FD, "Invalid io buff [%d] : %d %d %d",
+ i, io_cfg[i].direction,
+ io_cfg[i].resource_type, rc);
+ continue;
+ }
+
+ memset(io_addr, 0x0, sizeof(io_addr));
+ for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+ if (!io_cfg[i].mem_handle[plane])
+ break;
+
+ io_addr[plane] = 0x0;
+ cpu_addr[plane] = 0x0;
+
+ if (need_io_map) {
+ rc = cam_mem_get_io_buf(
+ io_cfg[i].mem_handle[plane],
+ iommu_hdl, &io_addr[plane], &size);
+ if (rc) {
+ CAM_ERR(CAM_FD,
+ "Invalid io buf %d %d %d %d",
+ io_cfg[i].direction,
+ io_cfg[i].resource_type, plane,
+ rc);
+ return -ENOMEM;
+ }
+
+ io_addr[plane] += io_cfg[i].offsets[plane];
+ }
+
+ if (need_cpu_map) {
+ rc = cam_mem_get_cpu_buf(
+ io_cfg[i].mem_handle[plane],
+ &cpu_addr[plane], &size);
+ if (rc || ((io_addr[plane] & 0xFFFFFFFF)
+ != io_addr[plane])) {
+ CAM_ERR(CAM_FD,
+ "Invalid cpu buf %d %d %d %d",
+ io_cfg[i].direction,
+ io_cfg[i].resource_type, plane,
+ rc);
+ return rc;
+ }
+
+ cpu_addr[plane] += io_cfg[i].offsets[plane];
+ }
+
+ CAM_DBG(CAM_FD, "IO Address[%d][%d] : %pK, %pK",
+ io_cfg[i].direction, plane, io_addr[plane],
+ cpu_addr[plane]);
+ }
+
+ switch (io_cfg[i].direction) {
+ case CAM_BUF_INPUT: {
+ prepare->in_map_entries[num_in_buf].resource_handle =
+ io_cfg[i].resource_type;
+ prepare->in_map_entries[num_in_buf].sync_id =
+ io_cfg[i].fence;
+
+ input_buf[num_in_buf].valid = true;
+ for (j = 0; j < plane; j++) {
+ input_buf[num_in_buf].io_addr[j] = io_addr[j];
+ input_buf[num_in_buf].cpu_addr[j] = cpu_addr[j];
+ }
+ input_buf[num_in_buf].num_buf = plane;
+ input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+ num_in_buf++;
+ break;
+ }
+ case CAM_BUF_OUTPUT: {
+ prepare->out_map_entries[num_out_buf].resource_handle =
+ io_cfg[i].resource_type;
+ prepare->out_map_entries[num_out_buf].sync_id =
+ io_cfg[i].fence;
+
+ output_buf[num_out_buf].valid = true;
+ for (j = 0; j < plane; j++) {
+ output_buf[num_out_buf].io_addr[j] = io_addr[j];
+ output_buf[num_out_buf].cpu_addr[j] =
+ cpu_addr[j];
+ }
+ output_buf[num_out_buf].num_buf = plane;
+ output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+ num_out_buf++;
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FD, "Unsupported io direction %d",
+ io_cfg[i].direction);
+ return -EINVAL;
+ }
+ }
+
+ prepare->num_in_map_entries = num_in_buf;
+ prepare->num_out_map_entries = num_out_buf;
+
+ return 0;
+}
+
+static int cam_fd_mgr_util_prepare_hw_update_entries(
+ struct cam_fd_hw_mgr *hw_mgr,
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_fd_hw_cmd_prestart_args *prestart_args,
+ struct cam_kmd_buf_info *kmd_buf_info)
+{
+ int i, rc;
+ struct cam_hw_update_entry *hw_entry;
+ uint32_t num_ent;
+ struct cam_fd_hw_mgr_ctx *hw_ctx =
+ (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
+ struct cam_fd_device *hw_device;
+ uint32_t kmd_buf_max_size, kmd_buf_used_bytes = 0;
+ uint32_t *kmd_buf_addr;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes);
+ kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+ prestart_args->cmd_buf_addr = kmd_buf_addr;
+ prestart_args->size = kmd_buf_max_size;
+ prestart_args->pre_config_buf_size = 0;
+ prestart_args->post_config_buf_size = 0;
+
+ if (hw_device->hw_intf->hw_ops.process_cmd) {
+ rc = hw_device->hw_intf->hw_ops.process_cmd(
+ hw_device->hw_intf->hw_priv, CAM_FD_HW_CMD_PRESTART,
+ prestart_args,
+ sizeof(struct cam_fd_hw_cmd_prestart_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d", rc);
+ return rc;
+ }
+ }
+
+ kmd_buf_used_bytes += prestart_args->pre_config_buf_size;
+ kmd_buf_used_bytes += prestart_args->post_config_buf_size;
+
+ /* HW layer is expected to add commands */
+ if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+ CAM_ERR(CAM_FD, "Invalid kmd used bytes %d (%d)",
+ kmd_buf_used_bytes, kmd_buf_max_size);
+ return -ENOMEM;
+ }
+
+ hw_entry = prepare->hw_update_entries;
+ num_ent = 0;
+
+ if (prestart_args->pre_config_buf_size) {
+ if ((num_ent + 1) >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ hw_entry[num_ent].handle = kmd_buf_info->handle;
+ hw_entry[num_ent].len = prestart_args->pre_config_buf_size;
+ hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+ kmd_buf_info->used_bytes += prestart_args->pre_config_buf_size;
+ kmd_buf_info->offset += prestart_args->pre_config_buf_size;
+ num_ent++;
+ }
+
+ /*
+ * set the cmd_desc to point the first command descriptor in the
+ * packet and update hw entries with CDM command buffers
+ */
+ cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+ &prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+ for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ if (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)
+ continue;
+
+ if (num_ent + 1 >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ hw_entry[num_ent].handle = cmd_desc[i].mem_handle;
+ hw_entry[num_ent].len = cmd_desc[i].length;
+ hw_entry[num_ent].offset = cmd_desc[i].offset;
+ num_ent++;
+ }
+
+ if (prestart_args->post_config_buf_size) {
+ if (num_ent + 1 >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_FD, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ hw_entry[num_ent].handle = kmd_buf_info->handle;
+ hw_entry[num_ent].len = prestart_args->post_config_buf_size;
+ hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+ kmd_buf_info->used_bytes += prestart_args->post_config_buf_size;
+ kmd_buf_info->offset += prestart_args->post_config_buf_size;
+
+ num_ent++;
+ }
+
+ prepare->num_hw_update_entries = num_ent;
+
+ CAM_DBG(CAM_FD, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+ prepare->num_hw_update_entries, prepare->num_in_map_entries,
+ prepare->num_out_map_entries);
+
+ return rc;
+}
+
+static int cam_fd_mgr_util_submit_frame(void *priv, void *data)
+{
+ struct cam_fd_device *hw_device;
+ struct cam_fd_hw_mgr *hw_mgr;
+ struct cam_fd_mgr_frame_request *frame_req;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ struct cam_fd_hw_cmd_start_args start_args;
+ int rc;
+
+ if (!priv) {
+ CAM_ERR(CAM_FD, "Invalid data");
+ return -EINVAL;
+ }
+
+ hw_mgr = (struct cam_fd_hw_mgr *)priv;
+ mutex_lock(&hw_mgr->frame_req_mutex);
+
+ /* Check if we have any frames pending in high priority list */
+ if (!list_empty(&hw_mgr->frame_pending_list_high)) {
+ CAM_DBG(CAM_FD, "Pending frames in high priority list");
+ frame_req = list_first_entry(&hw_mgr->frame_pending_list_high,
+ struct cam_fd_mgr_frame_request, list);
+ } else if (!list_empty(&hw_mgr->frame_pending_list_normal)) {
+ CAM_DBG(CAM_FD, "Pending frames in normal priority list");
+ frame_req = list_first_entry(&hw_mgr->frame_pending_list_normal,
+ struct cam_fd_mgr_frame_request, list);
+ } else {
+ mutex_unlock(&hw_mgr->frame_req_mutex);
+ CAM_DBG(CAM_FD, "No pending frames");
+ return 0;
+ }
+
+ CAM_DBG(CAM_FD, "FrameSubmit : Frame[%lld]", frame_req->request_id);
+ hw_ctx = frame_req->hw_ctx;
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ mutex_unlock(&hw_mgr->frame_req_mutex);
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&hw_device->lock);
+ if (hw_device->ready_to_process == false) {
+ mutex_unlock(&hw_device->lock);
+ mutex_unlock(&hw_mgr->frame_req_mutex);
+ return -EBUSY;
+ }
+
+ trace_cam_submit_to_hw("FD", frame_req->request_id);
+
+ list_del_init(&frame_req->list);
+ mutex_unlock(&hw_mgr->frame_req_mutex);
+
+ if (hw_device->hw_intf->hw_ops.start) {
+ start_args.hw_ctx = hw_ctx;
+ start_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+ start_args.hw_req_private = &frame_req->hw_req_private;
+ start_args.hw_update_entries = frame_req->hw_update_entries;
+ start_args.num_hw_update_entries =
+ frame_req->num_hw_update_entries;
+
+ rc = hw_device->hw_intf->hw_ops.start(
+ hw_device->hw_intf->hw_priv, &start_args,
+ sizeof(start_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HW Start %d", rc);
+ mutex_unlock(&hw_device->lock);
+ goto put_req_into_free_list;
+ }
+ } else {
+ CAM_ERR(CAM_FD, "Invalid hw_ops.start");
+ mutex_unlock(&hw_device->lock);
+ rc = -EPERM;
+ goto put_req_into_free_list;
+ }
+
+ hw_device->ready_to_process = false;
+ hw_device->cur_hw_ctx = hw_ctx;
+ hw_device->req_id = frame_req->request_id;
+ mutex_unlock(&hw_device->lock);
+
+ rc = cam_fd_mgr_util_put_frame_req(
+ &hw_mgr->frame_processing_list, &frame_req);
+ if (rc) {
+ CAM_ERR(CAM_FD,
+ "Failed in putting frame req in processing list");
+ goto stop_unlock;
+ }
+
+ return rc;
+
+stop_unlock:
+ if (hw_device->hw_intf->hw_ops.stop) {
+ struct cam_fd_hw_stop_args stop_args;
+
+ stop_args.hw_ctx = hw_ctx;
+ stop_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+ stop_args.hw_req_private = &frame_req->hw_req_private;
+ if (hw_device->hw_intf->hw_ops.stop(
+ hw_device->hw_intf->hw_priv, &stop_args,
+ sizeof(stop_args)))
+ CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+ }
+put_req_into_free_list:
+ cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, &frame_req);
+
+ return rc;
+}
+
+static int cam_fd_mgr_util_schedule_frame_worker_task(
+ struct cam_fd_hw_mgr *hw_mgr)
+{
+ int32_t rc = 0;
+ struct crm_workq_task *task;
+ struct cam_fd_mgr_work_data *work_data;
+
+ task = cam_req_mgr_workq_get_task(hw_mgr->work);
+ if (!task) {
+ CAM_ERR(CAM_FD, "no empty task available");
+ return -ENOMEM;
+ }
+
+ work_data = (struct cam_fd_mgr_work_data *)task->payload;
+ work_data->type = CAM_FD_WORK_FRAME;
+
+ task->process_cb = cam_fd_mgr_util_submit_frame;
+ rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+ return rc;
+}
+
+static int32_t cam_fd_mgr_workq_irq_cb(void *priv, void *data)
+{
+ struct cam_fd_device *hw_device = NULL;
+ struct cam_fd_hw_mgr *hw_mgr;
+ struct cam_fd_mgr_work_data *work_data;
+ struct cam_fd_mgr_frame_request *frame_req = NULL;
+ enum cam_fd_hw_irq_type irq_type;
+ bool frame_abort = true;
+ int rc;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_FD, "Invalid data %pK %pK", data, priv);
+ return -EINVAL;
+ }
+
+ hw_mgr = (struct cam_fd_hw_mgr *)priv;
+ work_data = (struct cam_fd_mgr_work_data *)data;
+ irq_type = work_data->irq_type;
+
+ CAM_DBG(CAM_FD, "FD IRQ type=%d", irq_type);
+
+ if (irq_type == CAM_FD_IRQ_HALT_DONE) {
+ /* HALT would be followed by a RESET, ignore this */
+ CAM_DBG(CAM_FD, "HALT IRQ callback");
+ return 0;
+ }
+
+ /* Get the frame from processing list */
+ rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_processing_list,
+ &frame_req);
+ if (rc || !frame_req) {
+ /*
+ * This can happen if reset is triggered while no frames
+ * were pending, so not an error, just continue to check if
+ * there are any pending frames and submit
+ */
+ CAM_DBG(CAM_FD, "No Frame in processing list, rc=%d", rc);
+ goto submit_next_frame;
+ }
+
+ if (!frame_req->hw_ctx) {
+ CAM_ERR(CAM_FD, "Invalid Frame request %lld",
+ frame_req->request_id);
+ goto put_req_in_free_list;
+ }
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, frame_req->hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ goto put_req_in_free_list;
+ }
+
+ /* Read frame results first */
+ if (irq_type == CAM_FD_IRQ_FRAME_DONE) {
+ struct cam_fd_hw_frame_done_args frame_done_args;
+
+ CAM_DBG(CAM_FD, "FrameDone : Frame[%lld]",
+ frame_req->request_id);
+
+ frame_done_args.hw_ctx = frame_req->hw_ctx;
+ frame_done_args.ctx_hw_private =
+ frame_req->hw_ctx->ctx_hw_private;
+ frame_done_args.request_id = frame_req->request_id;
+ frame_done_args.hw_req_private = &frame_req->hw_req_private;
+
+ if (hw_device->hw_intf->hw_ops.process_cmd) {
+ rc = hw_device->hw_intf->hw_ops.process_cmd(
+ hw_device->hw_intf->hw_priv,
+ CAM_FD_HW_CMD_FRAME_DONE,
+ &frame_done_args, sizeof(frame_done_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d",
+ rc);
+ frame_abort = true;
+ goto notify_context;
+ }
+ }
+
+ frame_abort = false;
+ }
+
+ trace_cam_irq_handled("FD", irq_type);
+
+notify_context:
+ /* Do a callback to inform frame done or stop done */
+ if (frame_req->hw_ctx->event_cb) {
+ struct cam_hw_done_event_data buf_data;
+
+ CAM_DBG(CAM_FD, "FrameHALT : Frame[%lld]",
+ frame_req->request_id);
+
+ buf_data.num_handles = frame_req->num_hw_update_entries;
+ buf_data.request_id = frame_req->request_id;
+
+ rc = frame_req->hw_ctx->event_cb(frame_req->hw_ctx->cb_priv,
+ frame_abort, &buf_data);
+ if (rc)
+ CAM_ERR(CAM_FD, "Error in event cb handling %d", rc);
+ }
+
+ /*
+ * Now we can set hw device is free to process further frames.
+ * Note - Do not change state to IDLE until we read the frame results,
+ * Otherwise, other thread may schedule frame processing before
+ * reading current frame's results. Also, we need to set to IDLE state
+ * in case some error happens after getting this irq callback
+ */
+ mutex_lock(&hw_device->lock);
+ hw_device->ready_to_process = true;
+ hw_device->req_id = -1;
+ hw_device->cur_hw_ctx = NULL;
+ CAM_DBG(CAM_FD, "ready_to_process=%d", hw_device->ready_to_process);
+ mutex_unlock(&hw_device->lock);
+
+put_req_in_free_list:
+ rc = cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in putting frame req in free list");
+ /* continue */
+ }
+
+submit_next_frame:
+ /* Check if there are any frames pending for processing and submit */
+ rc = cam_fd_mgr_util_submit_frame(hw_mgr, NULL);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error while submit frame, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int cam_fd_mgr_irq_cb(void *data, enum cam_fd_hw_irq_type irq_type)
+{
+ struct cam_fd_hw_mgr *hw_mgr = &g_fd_hw_mgr;
+ int rc = 0;
+ unsigned long flags;
+ struct crm_workq_task *task;
+ struct cam_fd_mgr_work_data *work_data;
+
+ spin_lock_irqsave(&hw_mgr->hw_mgr_slock, flags);
+ task = cam_req_mgr_workq_get_task(hw_mgr->work);
+ if (!task) {
+ CAM_ERR(CAM_FD, "no empty task available");
+ spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
+ return -ENOMEM;
+ }
+
+ work_data = (struct cam_fd_mgr_work_data *)task->payload;
+ work_data->type = CAM_FD_WORK_IRQ;
+ work_data->irq_type = irq_type;
+
+ task->process_cb = cam_fd_mgr_workq_irq_cb;
+ rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+ if (rc)
+ CAM_ERR(CAM_FD, "Failed in enqueue work task, rc=%d", rc);
+
+ spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
+
+ return rc;
+}
+
+static int cam_fd_mgr_hw_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+ int rc = 0;
+ struct cam_fd_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_query_cap_cmd *query = hw_get_caps_args;
+ struct cam_fd_query_cap_cmd query_fd;
+ void __user *caps_handle =
+ u64_to_user_ptr(query->caps_handle);
+
+ if (copy_from_user(&query_fd, caps_handle,
+ sizeof(struct cam_fd_query_cap_cmd))) {
+ CAM_ERR(CAM_FD, "Failed in copy from user, rc=%d", rc);
+ return -EFAULT;
+ }
+
+ query_fd = hw_mgr->fd_caps;
+
+ CAM_DBG(CAM_FD,
+ "IOMMU device(%d, %d), CDM(%d, %d), versions %d.%d, %d.%d",
+ query_fd.device_iommu.secure, query_fd.device_iommu.non_secure,
+ query_fd.cdm_iommu.secure, query_fd.cdm_iommu.non_secure,
+ query_fd.hw_caps.core_version.major,
+ query_fd.hw_caps.core_version.minor,
+ query_fd.hw_caps.wrapper_version.major,
+ query_fd.hw_caps.wrapper_version.minor);
+
+ if (copy_to_user(caps_handle, &query_fd,
+ sizeof(struct cam_fd_query_cap_cmd)))
+ rc = -EFAULT;
+
+ return rc;
+}
+
+static int cam_fd_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_hw_acquire_args *acquire_args =
+ (struct cam_hw_acquire_args *)hw_acquire_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ struct cam_fd_acquire_dev_info fd_acquire_args;
+ int rc;
+
+ if (!acquire_args || acquire_args->num_acq <= 0) {
+ CAM_ERR(CAM_FD, "Invalid acquire args %pK", acquire_args);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&fd_acquire_args,
+ (void __user *)acquire_args->acquire_info,
+ sizeof(struct cam_fd_acquire_dev_info))) {
+ CAM_ERR(CAM_FD, "Copy from user failed");
+ return -EFAULT;
+ }
+
+ CAM_DBG(CAM_FD, "Acquire : mode=%d, get_raw_results=%d, priority=%d",
+ fd_acquire_args.mode, fd_acquire_args.get_raw_results,
+ fd_acquire_args.priority);
+
+ /* get a free fd hw mgr ctx */
+ rc = cam_fd_mgr_util_get_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+ if (rc || !hw_ctx) {
+ CAM_ERR(CAM_FD, "Get hw context failed, rc=%d, hw_ctx=%pK",
+ rc, hw_ctx);
+ return -EINVAL;
+ }
+
+ if (fd_acquire_args.get_raw_results && !hw_mgr->raw_results_available) {
+ CAM_ERR(CAM_FD, "HW cannot support raw results %d (%d)",
+ fd_acquire_args.get_raw_results,
+ hw_mgr->raw_results_available);
+ goto put_ctx;
+ }
+
+ if (!(fd_acquire_args.mode & hw_mgr->supported_modes)) {
+ CAM_ERR(CAM_FD, "HW cannot support requested mode 0x%x (0x%x)",
+ fd_acquire_args.mode, hw_mgr->supported_modes);
+ rc = -EPERM;
+ goto put_ctx;
+ }
+
+ rc = cam_fd_mgr_util_select_device(hw_mgr, hw_ctx, &fd_acquire_args);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in selecting device, rc=%d", rc);
+ goto put_ctx;
+ }
+
+ hw_ctx->ctx_in_use = true;
+ hw_ctx->hw_mgr = hw_mgr;
+ hw_ctx->get_raw_results = fd_acquire_args.get_raw_results;
+ hw_ctx->mode = fd_acquire_args.mode;
+
+ /* Save incoming cam core info into hw ctx*/
+ hw_ctx->cb_priv = acquire_args->context_data;
+ hw_ctx->event_cb = acquire_args->event_cb;
+
+ /* Update out args */
+ acquire_args->ctxt_to_hw_map = hw_ctx;
+
+ cam_fd_mgr_util_put_ctx(&hw_mgr->used_ctx_list, &hw_ctx);
+
+ return 0;
+put_ctx:
+ list_del_init(&hw_ctx->list);
+ cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+ return rc;
+}
+
+static int cam_fd_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_hw_release_args *release_args = hw_release_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ int rc;
+
+ if (!hw_mgr_priv || !hw_release_args) {
+ CAM_ERR(CAM_FD, "Invalid arguments %pK, %pK",
+ hw_mgr_priv, hw_release_args);
+ return -EINVAL;
+ }
+
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+
+ rc = cam_fd_mgr_util_release_device(hw_mgr, hw_ctx);
+ if (rc)
+ CAM_ERR(CAM_FD, "Failed in release device, rc=%d", rc);
+
+ hw_ctx->ctx_in_use = false;
+ list_del_init(&hw_ctx->list);
+ cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+
+ return 0;
+}
+
+static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args)
+{
+ int rc = 0;
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_hw_start_args *hw_mgr_start_args =
+ (struct cam_hw_start_args *)mgr_start_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ struct cam_fd_device *hw_device;
+ struct cam_fd_hw_init_args hw_init_args;
+
+ if (!hw_mgr_priv || !hw_mgr_start_args) {
+ CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+ hw_mgr_priv, hw_mgr_start_args);
+ return -EINVAL;
+ }
+
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_start_args->ctxt_to_hw_map;
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
+ hw_ctx->device_index);
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ if (hw_device->hw_intf->hw_ops.init) {
+ hw_init_args.hw_ctx = hw_ctx;
+ hw_init_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+ rc = hw_device->hw_intf->hw_ops.init(
+ hw_device->hw_intf->hw_priv, &hw_init_args,
+ sizeof(hw_init_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HW Init %d", rc);
+ return rc;
+ }
+ } else {
+ CAM_ERR(CAM_FD, "Invalid init function");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_fd_mgr_hw_flush_req(void *hw_mgr_priv,
+ struct cam_hw_flush_args *flush_args)
+{
+ int rc = 0;
+ struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_fd_device *hw_device;
+ struct cam_fd_hw_stop_args hw_stop_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ uint32_t i = 0;
+
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
+
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+ CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+ hw_ctx->device_index);
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&hw_mgr->frame_req_mutex);
+ for (i = 0; i < flush_args->num_req_active; i++) {
+ flush_req = (struct cam_fd_mgr_frame_request *)
+ flush_args->flush_req_active[i];
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_pending_list_high, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ if (frame_req->request_id != flush_req->request_id)
+ continue;
+
+ list_del_init(&frame_req->list);
+ break;
+ }
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_pending_list_normal, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ if (frame_req->request_id != flush_req->request_id)
+ continue;
+
+ list_del_init(&frame_req->list);
+ break;
+ }
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_processing_list, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ if (frame_req->request_id != flush_req->request_id)
+ continue;
+
+ list_del_init(&frame_req->list);
+
+ mutex_lock(&hw_device->lock);
+ if ((hw_device->ready_to_process == true) ||
+ (hw_device->cur_hw_ctx != hw_ctx))
+ goto unlock_dev_flush_req;
+
+ if (hw_device->hw_intf->hw_ops.stop) {
+ hw_stop_args.hw_ctx = hw_ctx;
+ rc = hw_device->hw_intf->hw_ops.stop(
+ hw_device->hw_intf->hw_priv,
+ &hw_stop_args,
+ sizeof(hw_stop_args));
+ if (rc) {
+ CAM_ERR(CAM_FD,
+ "Failed in HW Stop %d", rc);
+ goto unlock_dev_flush_req;
+ }
+ hw_device->ready_to_process = true;
+ }
+
+unlock_dev_flush_req:
+ mutex_unlock(&hw_device->lock);
+ break;
+ }
+ }
+ mutex_unlock(&hw_mgr->frame_req_mutex);
+
+ for (i = 0; i < flush_args->num_req_pending; i++) {
+ flush_req = (struct cam_fd_mgr_frame_request *)
+ flush_args->flush_req_pending[i];
+ cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &flush_req);
+ }
+
+ return rc;
+}
+
+static int cam_fd_mgr_hw_flush_ctx(void *hw_mgr_priv,
+ struct cam_hw_flush_args *flush_args)
+{
+ int rc = 0;
+ struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_fd_device *hw_device;
+ struct cam_fd_hw_stop_args hw_stop_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ uint32_t i = 0;
+
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
+
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+ CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+ hw_ctx->device_index);
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&hw_mgr->frame_req_mutex);
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_pending_list_high, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ list_del_init(&frame_req->list);
+ }
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_pending_list_normal, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ list_del_init(&frame_req->list);
+ }
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_processing_list, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ list_del_init(&frame_req->list);
+ mutex_lock(&hw_device->lock);
+ if ((hw_device->ready_to_process == true) ||
+ (hw_device->cur_hw_ctx != hw_ctx))
+ goto unlock_dev_flush_ctx;
+
+ if (hw_device->hw_intf->hw_ops.stop) {
+ hw_stop_args.hw_ctx = hw_ctx;
+ rc = hw_device->hw_intf->hw_ops.stop(
+ hw_device->hw_intf->hw_priv, &hw_stop_args,
+ sizeof(hw_stop_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+ goto unlock_dev_flush_ctx;
+ }
+ hw_device->ready_to_process = true;
+ }
+
+unlock_dev_flush_ctx:
+ mutex_unlock(&hw_device->lock);
+ }
+ mutex_unlock(&hw_mgr->frame_req_mutex);
+
+ for (i = 0; i < flush_args->num_req_pending; i++) {
+ flush_req = (struct cam_fd_mgr_frame_request *)
+ flush_args->flush_req_pending[i];
+ cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &flush_req);
+ }
+
+ return rc;
+}
+
+static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
+ void *hw_flush_args)
+{
+ int rc = 0;
+ struct cam_hw_flush_args *flush_args =
+ (struct cam_hw_flush_args *)hw_flush_args;
+
+ if (!hw_mgr_priv || !hw_flush_args) {
+ CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+ hw_mgr_priv, hw_flush_args);
+ return -EINVAL;
+ }
+
+ switch (flush_args->flush_type) {
+ case CAM_FLUSH_TYPE_REQ:
+ rc = cam_fd_mgr_hw_flush_req(hw_mgr_priv, flush_args);
+ break;
+ case CAM_FLUSH_TYPE_ALL:
+ rc = cam_fd_mgr_hw_flush_ctx(hw_mgr_priv, flush_args);
+ break;
+ default:
+ rc = -EINVAL;
+ CAM_ERR(CAM_FD, "Invalid flush type %d",
+ flush_args->flush_type);
+ break;
+ }
+ return rc;
+}
+
+static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
+{
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_hw_stop_args *hw_mgr_stop_args =
+ (struct cam_hw_stop_args *)mgr_stop_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ struct cam_fd_device *hw_device;
+ struct cam_fd_hw_deinit_args hw_deinit_args;
+ int rc = 0;
+
+ if (!hw_mgr_priv || !hw_mgr_stop_args) {
+ CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+ hw_mgr_priv, hw_mgr_stop_args);
+ return -EINVAL;
+ }
+
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_stop_args->ctxt_to_hw_map;
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+ CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+ hw_ctx->device_index);
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
+ hw_device->ready_to_process);
+
+ if (hw_device->hw_intf->hw_ops.deinit) {
+ hw_deinit_args.hw_ctx = hw_ctx;
+ hw_deinit_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+ rc = hw_device->hw_intf->hw_ops.deinit(
+ hw_device->hw_intf->hw_priv, &hw_deinit_args,
+ sizeof(hw_deinit_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HW DeInit %d", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
+ void *hw_prepare_update_args)
+{
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_hw_prepare_update_args *prepare =
+ (struct cam_hw_prepare_update_args *) hw_prepare_update_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ struct cam_fd_device *hw_device;
+ struct cam_kmd_buf_info kmd_buf;
+ int rc;
+ struct cam_fd_hw_cmd_prestart_args prestart_args;
+ struct cam_fd_mgr_frame_request *frame_req;
+
+ if (!hw_mgr_priv || !hw_prepare_update_args) {
+ CAM_ERR(CAM_FD, "Invalid args %pK %pK",
+ hw_mgr_priv, hw_prepare_update_args);
+ return -EINVAL;
+ }
+
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ goto error;
+ }
+
+ rc = cam_fd_mgr_util_packet_validate(prepare->packet);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in packet validation %d", rc);
+ goto error;
+ }
+
+ rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in get kmd buf buffer %d", rc);
+ goto error;
+ }
+
+ CAM_DBG(CAM_FD,
+ "KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+ kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+ kmd_buf.size, kmd_buf.used_bytes);
+
+ /* We do not expect any patching, but just do it anyway */
+ rc = cam_packet_util_process_patches(prepare->packet,
+ hw_mgr->device_iommu.non_secure, -1);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc);
+ return rc;
+ }
+
+ memset(&prestart_args, 0x0, sizeof(prestart_args));
+ prestart_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+ prestart_args.hw_ctx = hw_ctx;
+ prestart_args.request_id = prepare->packet->header.request_id;
+
+ rc = cam_fd_mgr_util_parse_generic_cmd_buffer(hw_ctx, prepare->packet,
+ &prestart_args);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in parsing gerneric cmd buffer %d", rc);
+ goto error;
+ }
+
+ rc = cam_fd_mgr_util_prepare_io_buf_info(
+ hw_mgr->device_iommu.non_secure, prepare,
+ prestart_args.input_buf, prestart_args.output_buf,
+ CAM_FD_MAX_IO_BUFFERS);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in prepare IO Buf %d", rc);
+ goto error;
+ }
+
+ rc = cam_fd_mgr_util_prepare_hw_update_entries(hw_mgr, prepare,
+ &prestart_args, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in hw update entries %d", rc);
+ goto error;
+ }
+
+ /* get a free frame req from free list */
+ rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+ &frame_req);
+ if (rc || !frame_req) {
+ CAM_ERR(CAM_FD, "Get frame_req failed, rc=%d, hw_ctx=%pK",
+ rc, hw_ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup frame request info and queue to pending list */
+ frame_req->hw_ctx = hw_ctx;
+ frame_req->request_id = prepare->packet->header.request_id;
+ /* This has to be passed to HW while calling hw_ops->start */
+ frame_req->hw_req_private = prestart_args.hw_req_private;
+
+ /*
+ * Save the current frame_req into priv,
+ * this will come as priv while hw_config
+ */
+ prepare->priv = frame_req;
+
+ CAM_DBG(CAM_FD, "FramePrepare : Frame[%lld]", frame_req->request_id);
+
+ return 0;
+error:
+ return rc;
+}
+
+static int cam_fd_mgr_hw_config(void *hw_mgr_priv, void *hw_config_args)
+{
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_hw_config_args *config =
+ (struct cam_hw_config_args *) hw_config_args;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ struct cam_fd_mgr_frame_request *frame_req;
+ int rc;
+ int i;
+
+ if (!hw_mgr || !config) {
+ CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", hw_mgr, config);
+ return -EINVAL;
+ }
+
+ if (!config->num_hw_update_entries) {
+ CAM_ERR(CAM_FD, "No hw update enteries are available");
+ return -EINVAL;
+ }
+
+ hw_ctx = (struct cam_fd_hw_mgr_ctx *)config->ctxt_to_hw_map;
+ if (!hw_ctx || !hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+
+ frame_req = config->priv;
+
+ trace_cam_apply_req("FD", frame_req->request_id);
+ CAM_DBG(CAM_FD, "FrameHWConfig : Frame[%lld]", frame_req->request_id);
+
+ frame_req->num_hw_update_entries = config->num_hw_update_entries;
+ for (i = 0; i < config->num_hw_update_entries; i++) {
+ frame_req->hw_update_entries[i] = config->hw_update_entries[i];
+ CAM_DBG(CAM_FD, "PreStart HWEntry[%d] : %d %d %d %d %pK",
+ frame_req->hw_update_entries[i].handle,
+ frame_req->hw_update_entries[i].offset,
+ frame_req->hw_update_entries[i].len,
+ frame_req->hw_update_entries[i].flags,
+ frame_req->hw_update_entries[i].addr);
+ }
+
+ if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
+ CAM_DBG(CAM_FD, "Insert frame into prio0 queue");
+ rc = cam_fd_mgr_util_put_frame_req(
+ &hw_mgr->frame_pending_list_high, &frame_req);
+ } else {
+ CAM_DBG(CAM_FD, "Insert frame into prio1 queue");
+ rc = cam_fd_mgr_util_put_frame_req(
+ &hw_mgr->frame_pending_list_normal, &frame_req);
+ }
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in queuing frame req, rc=%d", rc);
+ goto put_free_list;
+ }
+
+ rc = cam_fd_mgr_util_schedule_frame_worker_task(hw_mgr);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Worker task scheduling failed %d", rc);
+ goto remove_and_put_free_list;
+ }
+
+ return 0;
+
+remove_and_put_free_list:
+
+ if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
+ CAM_DBG(CAM_FD, "Removing frame into prio0 queue");
+ cam_fd_mgr_util_get_frame_req(
+ &hw_mgr->frame_pending_list_high, &frame_req);
+ } else {
+ CAM_DBG(CAM_FD, "Removing frame into prio1 queue");
+ cam_fd_mgr_util_get_frame_req(
+ &hw_mgr->frame_pending_list_normal, &frame_req);
+ }
+put_free_list:
+ cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req);
+
+ return rc;
+}
+
+int cam_fd_hw_mgr_deinit(struct device_node *of_node)
+{
+ CAM_DBG(CAM_FD, "HW Mgr Deinit");
+
+ cam_req_mgr_workq_destroy(&g_fd_hw_mgr.work);
+
+ cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
+ g_fd_hw_mgr.device_iommu.non_secure = -1;
+
+ mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
+ mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
+ mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
+
+ return 0;
+}
+
+int cam_fd_hw_mgr_init(struct device_node *of_node,
+ struct cam_hw_mgr_intf *hw_mgr_intf)
+{
+ int count, i, rc = 0;
+ struct cam_hw_intf *hw_intf = NULL;
+ struct cam_fd_hw_mgr_ctx *hw_mgr_ctx;
+ struct cam_fd_device *hw_device;
+ struct cam_fd_mgr_frame_request *frame_req;
+
+ if (!of_node || !hw_mgr_intf) {
+ CAM_ERR(CAM_FD, "Invalid args of_node %pK hw_mgr_intf %pK",
+ of_node, hw_mgr_intf);
+ return -EINVAL;
+ }
+
+ memset(&g_fd_hw_mgr, 0x0, sizeof(g_fd_hw_mgr));
+ memset(hw_mgr_intf, 0x0, sizeof(*hw_mgr_intf));
+
+ mutex_init(&g_fd_hw_mgr.ctx_mutex);
+ mutex_init(&g_fd_hw_mgr.frame_req_mutex);
+ mutex_init(&g_fd_hw_mgr.hw_mgr_mutex);
+ spin_lock_init(&g_fd_hw_mgr.hw_mgr_slock);
+
+ count = of_property_count_strings(of_node, "compat-hw-name");
+ if (!count || (count > CAM_FD_HW_MAX)) {
+ CAM_ERR(CAM_FD, "Invalid compat names in dev tree %d", count);
+ return -EINVAL;
+ }
+ g_fd_hw_mgr.num_devices = count;
+
+ g_fd_hw_mgr.raw_results_available = false;
+ g_fd_hw_mgr.supported_modes = 0;
+
+ for (i = 0; i < count; i++) {
+ hw_device = &g_fd_hw_mgr.hw_device[i];
+
+ rc = cam_fd_mgr_util_pdev_get_hw_intf(of_node, i, &hw_intf);
+ if (rc) {
+ CAM_ERR(CAM_FD, "hw intf from pdev failed, rc=%d", rc);
+ return rc;
+ }
+
+ mutex_init(&hw_device->lock);
+
+ hw_device->valid = true;
+ hw_device->hw_intf = hw_intf;
+ hw_device->ready_to_process = true;
+
+ if (hw_device->hw_intf->hw_ops.process_cmd) {
+ struct cam_fd_hw_cmd_set_irq_cb irq_cb_args;
+
+ irq_cb_args.cam_fd_hw_mgr_cb = cam_fd_mgr_irq_cb;
+ irq_cb_args.data = hw_device;
+
+ rc = hw_device->hw_intf->hw_ops.process_cmd(
+ hw_device->hw_intf->hw_priv,
+ CAM_FD_HW_CMD_REGISTER_CALLBACK,
+ &irq_cb_args, sizeof(irq_cb_args));
+ if (rc) {
+ CAM_ERR(CAM_FD,
+ "Failed in REGISTER_CALLBACK %d", rc);
+ return rc;
+ }
+ }
+
+ if (hw_device->hw_intf->hw_ops.get_hw_caps) {
+ rc = hw_device->hw_intf->hw_ops.get_hw_caps(
+ hw_intf->hw_priv, &hw_device->hw_caps,
+ sizeof(hw_device->hw_caps));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in get_hw_caps %d", rc);
+ return rc;
+ }
+
+ g_fd_hw_mgr.raw_results_available |=
+ hw_device->hw_caps.raw_results_available;
+ g_fd_hw_mgr.supported_modes |=
+ hw_device->hw_caps.supported_modes;
+
+ CAM_DBG(CAM_FD,
+ "Device[mode=%d, raw=%d], Mgr[mode=%d, raw=%d]",
+ hw_device->hw_caps.supported_modes,
+ hw_device->hw_caps.raw_results_available,
+ g_fd_hw_mgr.supported_modes,
+ g_fd_hw_mgr.raw_results_available);
+ }
+ }
+
+ INIT_LIST_HEAD(&g_fd_hw_mgr.free_ctx_list);
+ INIT_LIST_HEAD(&g_fd_hw_mgr.used_ctx_list);
+ INIT_LIST_HEAD(&g_fd_hw_mgr.frame_free_list);
+ INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_high);
+ INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_normal);
+ INIT_LIST_HEAD(&g_fd_hw_mgr.frame_processing_list);
+
+ g_fd_hw_mgr.device_iommu.non_secure = -1;
+ g_fd_hw_mgr.device_iommu.secure = -1;
+ g_fd_hw_mgr.cdm_iommu.non_secure = -1;
+ g_fd_hw_mgr.cdm_iommu.secure = -1;
+
+ rc = cam_smmu_get_handle("fd",
+ &g_fd_hw_mgr.device_iommu.non_secure);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Get iommu handle failed, rc=%d", rc);
+ goto destroy_mutex;
+ }
+
+ rc = cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_ATTACH);
+ if (rc) {
+ CAM_ERR(CAM_FD, "FD attach iommu handle failed, rc=%d", rc);
+ goto destroy_smmu;
+ }
+
+ rc = cam_cdm_get_iommu_handle("fd", &g_fd_hw_mgr.cdm_iommu);
+ if (rc)
+ CAM_DBG(CAM_FD, "Failed to acquire the CDM iommu handles");
+
+ CAM_DBG(CAM_FD, "iommu handles : device(%d, %d), cdm(%d, %d)",
+ g_fd_hw_mgr.device_iommu.non_secure,
+ g_fd_hw_mgr.device_iommu.secure,
+ g_fd_hw_mgr.cdm_iommu.non_secure,
+ g_fd_hw_mgr.cdm_iommu.secure);
+
+ /* Init hw mgr contexts and add to free list */
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ hw_mgr_ctx = &g_fd_hw_mgr.ctx_pool[i];
+
+ memset(hw_mgr_ctx, 0x0, sizeof(*hw_mgr_ctx));
+ INIT_LIST_HEAD(&hw_mgr_ctx->list);
+
+ hw_mgr_ctx->ctx_index = i;
+ hw_mgr_ctx->device_index = -1;
+ hw_mgr_ctx->hw_mgr = &g_fd_hw_mgr;
+
+ list_add_tail(&hw_mgr_ctx->list, &g_fd_hw_mgr.free_ctx_list);
+ }
+
+ /* Init hw mgr frame requests and add to free list */
+ for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+ frame_req = &g_fd_hw_mgr.frame_req[i];
+
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->list);
+
+ list_add_tail(&frame_req->list, &g_fd_hw_mgr.frame_free_list);
+ }
+
+ rc = cam_req_mgr_workq_create("cam_fd_worker", CAM_FD_WORKQ_NUM_TASK,
+ &g_fd_hw_mgr.work, CRM_WORKQ_USAGE_IRQ, 0);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Unable to create a worker, rc=%d", rc);
+ goto detach_smmu;
+ }
+
+ for (i = 0; i < CAM_FD_WORKQ_NUM_TASK; i++)
+ g_fd_hw_mgr.work->task.pool[i].payload =
+ &g_fd_hw_mgr.work_data[i];
+
+ /* Setup hw cap so that we can just return the info when requested */
+ memset(&g_fd_hw_mgr.fd_caps, 0, sizeof(g_fd_hw_mgr.fd_caps));
+ g_fd_hw_mgr.fd_caps.device_iommu = g_fd_hw_mgr.device_iommu;
+ g_fd_hw_mgr.fd_caps.cdm_iommu = g_fd_hw_mgr.cdm_iommu;
+ g_fd_hw_mgr.fd_caps.hw_caps = g_fd_hw_mgr.hw_device[0].hw_caps;
+
+ CAM_DBG(CAM_FD,
+ "IOMMU device(%d, %d), CDM(%d, %d) versions core[%d.%d], wrapper[%d.%d]",
+ g_fd_hw_mgr.fd_caps.device_iommu.secure,
+ g_fd_hw_mgr.fd_caps.device_iommu.non_secure,
+ g_fd_hw_mgr.fd_caps.cdm_iommu.secure,
+ g_fd_hw_mgr.fd_caps.cdm_iommu.non_secure,
+ g_fd_hw_mgr.fd_caps.hw_caps.core_version.major,
+ g_fd_hw_mgr.fd_caps.hw_caps.core_version.minor,
+ g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.major,
+ g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.minor);
+
+ hw_mgr_intf->hw_mgr_priv = &g_fd_hw_mgr;
+ hw_mgr_intf->hw_get_caps = cam_fd_mgr_hw_get_caps;
+ hw_mgr_intf->hw_acquire = cam_fd_mgr_hw_acquire;
+ hw_mgr_intf->hw_release = cam_fd_mgr_hw_release;
+ hw_mgr_intf->hw_start = cam_fd_mgr_hw_start;
+ hw_mgr_intf->hw_stop = cam_fd_mgr_hw_stop;
+ hw_mgr_intf->hw_prepare_update = cam_fd_mgr_hw_prepare_update;
+ hw_mgr_intf->hw_config = cam_fd_mgr_hw_config;
+ hw_mgr_intf->hw_read = NULL;
+ hw_mgr_intf->hw_write = NULL;
+ hw_mgr_intf->hw_close = NULL;
+ hw_mgr_intf->hw_flush = cam_fd_mgr_hw_flush;
+
+ return rc;
+
+detach_smmu:
+ cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_DETACH);
+destroy_smmu:
+ cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
+ g_fd_hw_mgr.device_iommu.non_secure = -1;
+destroy_mutex:
+ mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
+ mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
+ mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
new file mode 100644
index 000000000000..a903ab091c73
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
@@ -0,0 +1,186 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_MGR_H_
+#define _CAM_FD_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <media/cam_fd.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_fd_hw_intf.h"
+
+#define CAM_FD_HW_MAX 1
+#define CAM_FD_WORKQ_NUM_TASK 10
+
+struct cam_fd_hw_mgr;
+
+/**
+ * enum cam_fd_mgr_work_type - Type of worker task
+ *
+ * @CAM_FD_WORK_FRAME : Work type indicating frame task
+ * @CAM_FD_WORK_IRQ : Work type indicating irq task
+ */
+enum cam_fd_mgr_work_type {
+ CAM_FD_WORK_FRAME,
+ CAM_FD_WORK_IRQ,
+};
+
+/**
+ * struct cam_fd_hw_mgr_ctx : FD HW Mgr context
+ *
+ * @list : List pointer used to maintain this context
+ * in free, used list
+ * @ctx_index : Index of this context
+ * @ctx_in_use : Whether this context is in use
+ * @event_cb : Event callback pointer to notify cam core context
+ * @cb_priv : Event callback private pointer
+ * @hw_mgr : Pointer to hw manager
+ * @get_raw_results : Whether this context needs raw results
+ * @mode : Mode in which this context runs
+ * @device_index : HW Device used by this context
+ * @ctx_hw_private : HW layer's private context pointer for this context
+ * @priority : Priority of this context
+ */
+struct cam_fd_hw_mgr_ctx {
+ struct list_head list;
+ uint32_t ctx_index;
+ bool ctx_in_use;
+ cam_hw_event_cb_func event_cb;
+ void *cb_priv;
+ struct cam_fd_hw_mgr *hw_mgr;
+ bool get_raw_results;
+ enum cam_fd_hw_mode mode;
+ int32_t device_index;
+ void *ctx_hw_private;
+ uint32_t priority;
+};
+
+/**
+ * struct cam_fd_device : FD HW Device
+ *
+ * @hw_caps : This FD device's capabilities
+ * @hw_intf : FD device's interface information
+ * @ready_to_process : Whether this device is ready to process next frame
+ * @num_ctxts : Number of context currently running on this device
+ * @valid : Whether this device is valid
+ * @lock : Lock used for protectin
+ * @cur_hw_ctx : current hw context running in the device
+ * @req_id : current processing req id
+ */
+struct cam_fd_device {
+ struct cam_fd_hw_caps hw_caps;
+ struct cam_hw_intf *hw_intf;
+ bool ready_to_process;
+ uint32_t num_ctxts;
+ bool valid;
+ struct mutex lock;
+ struct cam_fd_hw_mgr_ctx *cur_hw_ctx;
+ int64_t req_id;
+};
+
+/**
+ * struct cam_fd_mgr_frame_request : Frame request information maintained
+ * in HW Mgr layer
+ *
+ * @list : List pointer used to maintain this request in
+ * free, pending, processing request lists
+ * @request_id : Request ID corresponding to this request
+ * @hw_ctx : HW context from which this request is coming
+ * @hw_req_private : HW layer's private information specific to
+ * this request
+ * @hw_update_entries : HW update entries corresponding to this request
+ * which needs to be submitted to HW through CDM
+ * @num_hw_update_entries : Number of HW update entries
+ */
+struct cam_fd_mgr_frame_request {
+ struct list_head list;
+ uint64_t request_id;
+ struct cam_fd_hw_mgr_ctx *hw_ctx;
+ struct cam_fd_hw_req_private hw_req_private;
+ struct cam_hw_update_entry hw_update_entries[CAM_FD_MAX_HW_ENTRIES];
+ uint32_t num_hw_update_entries;
+};
+
+/**
+ * struct cam_fd_mgr_work_data : HW Mgr work data information
+ *
+ * @type : Type of work
+ * @irq_type : IRQ type when this work is queued because of irq callback
+ */
+struct cam_fd_mgr_work_data {
+ enum cam_fd_mgr_work_type type;
+ enum cam_fd_hw_irq_type irq_type;
+};
+
+/**
+ * struct cam_fd_hw_mgr : FD HW Mgr information
+ *
+ * @free_ctx_list : List of free contexts available for acquire
+ * @used_ctx_list : List of contexts that are acquired
+ * @frame_free_list : List of free frame requests available
+ * @frame_pending_list_high : List of high priority frame requests pending
+ * for processing
+ * @frame_pending_list_normal : List of normal priority frame requests pending
+ * for processing
+ * @frame_processing_list : List of frame requests currently being
+ * processed currently. Generally maximum one
+ * request would be present in this list
+ * @hw_mgr_mutex : Mutex to protect hw mgr data when accessed
+ * from multiple threads
+ * @hw_mgr_slock : Spin lock to protect hw mgr data when accessed
+ * from multiple threads
+ * @ctx_mutex : Mutex to protect context list
+ * @frame_req_mutex : Mutex to protect frame request list
+ * @device_iommu : Device IOMMU information
+ * @cdm_iommu : CDM IOMMU information
+ * @hw_device : Underlying HW device information
+ * @num_devices : Number of HW devices available
+ * @raw_results_available : Whether raw results available in this driver
+ * @supported_modes : Supported modes by this driver
+ * @ctx_pool : List of context
+ * @frame_req : List of frame requests
+ * @work : Worker handle
+ * @work_data : Worker data
+ * @fd_caps : FD driver capabilities
+ */
+struct cam_fd_hw_mgr {
+ struct list_head free_ctx_list;
+ struct list_head used_ctx_list;
+ struct list_head frame_free_list;
+ struct list_head frame_pending_list_high;
+ struct list_head frame_pending_list_normal;
+ struct list_head frame_processing_list;
+ struct mutex hw_mgr_mutex;
+ spinlock_t hw_mgr_slock;
+ struct mutex ctx_mutex;
+ struct mutex frame_req_mutex;
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ struct cam_fd_device hw_device[CAM_FD_HW_MAX];
+ uint32_t num_devices;
+ bool raw_results_available;
+ uint32_t supported_modes;
+ struct cam_fd_hw_mgr_ctx ctx_pool[CAM_CTX_MAX];
+ struct cam_fd_mgr_frame_request frame_req[CAM_CTX_REQ_MAX];
+ struct cam_req_mgr_core_workq *work;
+ struct cam_fd_mgr_work_data work_data[CAM_FD_WORKQ_NUM_TASK];
+ struct cam_fd_query_cap_cmd fd_caps;
+};
+
+#endif /* _CAM_FD_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h
new file mode 100644
index 000000000000..01e86b25a828
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_MGR_INTF_H_
+#define _CAM_FD_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+
+int cam_fd_hw_mgr_init(struct device_node *of_node,
+ struct cam_hw_mgr_intf *hw_mgr_intf);
+int cam_fd_hw_mgr_deinit(struct device_node *of_node);
+
+#endif /* _CAM_FD_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/Makefile
new file mode 100644
index 000000000000..7fe5a4067d02
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_dev.o cam_fd_hw_core.o cam_fd_hw_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
new file mode 100644
index 000000000000..12d3c6bf1060
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -0,0 +1,1168 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_trace.h"
+
+#define CAM_FD_REG_VAL_PAIR_SIZE 256
+
+static uint32_t cam_fd_cdm_write_reg_val_pair(uint32_t *buffer,
+ uint32_t index, uint32_t reg_offset, uint32_t reg_value)
+{
+ buffer[index++] = reg_offset;
+ buffer[index++] = reg_value;
+
+ CAM_DBG(CAM_FD, "FD_CDM_CMD: Base[FD_CORE] Offset[0x%8x] Value[0x%8x]",
+ reg_offset, reg_value);
+
+ return index;
+}
+
+static void cam_fd_hw_util_cdm_callback(uint32_t handle, void *userdata,
+ enum cam_cdm_cb_status status, uint64_t cookie)
+{
+ trace_cam_cdm_cb("FD", status);
+ CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+ handle, userdata, status, cookie);
+}
+
+static void cam_fd_hw_util_enable_power_on_settings(struct cam_hw_info *fd_hw)
+{
+ struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+ struct cam_fd_hw_static_info *hw_static_info =
+ ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+
+ if (hw_static_info->enable_errata_wa.single_irq_only == false) {
+ /* Enable IRQs here */
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.irq_mask,
+ hw_static_info->irq_mask);
+ }
+
+ /* QoS settings */
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.vbif_req_priority,
+ hw_static_info->qos_priority);
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.vbif_priority_level,
+ hw_static_info->qos_priority_level);
+}
+
+int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
+ struct cam_fd_hw_caps *hw_caps)
+{
+ struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+ struct cam_fd_hw_static_info *hw_static_info =
+ ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+ uint32_t reg_value;
+
+ if (!hw_static_info) {
+ CAM_ERR(CAM_FD, "Invalid hw info data");
+ return -EINVAL;
+ }
+
+ reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_CORE,
+ hw_static_info->core_regs.version);
+ hw_caps->core_version.major =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf00, 0x8);
+ hw_caps->core_version.minor =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0, 0x4);
+ hw_caps->core_version.incr =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf, 0x0);
+
+ reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.wrapper_version);
+ hw_caps->wrapper_version.major =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+ hw_caps->wrapper_version.minor =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->wrapper_version.incr =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ hw_caps->raw_results_available =
+ hw_static_info->results.raw_results_available;
+ hw_caps->supported_modes = hw_static_info->supported_modes;
+
+ CAM_DBG(CAM_FD, "core:%d.%d.%d wrapper:%d.%d.%d intermediate:%d",
+ hw_caps->core_version.major, hw_caps->core_version.minor,
+ hw_caps->core_version.incr, hw_caps->wrapper_version.major,
+ hw_caps->wrapper_version.minor, hw_caps->wrapper_version.incr,
+ hw_caps->raw_results_available);
+
+ return 0;
+}
+
+static int cam_fd_hw_util_fdwrapper_sync_reset(struct cam_hw_info *fd_hw)
+{
+ struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+ struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+ struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+ long time_left;
+
+ /* Before triggering reset to HW, clear the reset complete */
+ reinit_completion(&fd_core->reset_complete);
+
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
+ hw_static_info->core_regs.control, 0x1);
+
+ if (hw_static_info->enable_errata_wa.single_irq_only) {
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.irq_mask,
+ CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE));
+ }
+
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.sw_reset, 0x1);
+
+ time_left = wait_for_completion_timeout(&fd_core->reset_complete,
+ msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
+ if (time_left <= 0)
+ CAM_WARN(CAM_FD, "HW reset timeout time_left=%ld", time_left);
+
+ CAM_DBG(CAM_FD, "FD Wrapper SW Sync Reset complete");
+
+ return 0;
+}
+
+
+static int cam_fd_hw_util_fdwrapper_halt(struct cam_hw_info *fd_hw)
+{
+ struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+ struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+ struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+ long time_left;
+
+ /* Before triggering halt to HW, clear halt complete */
+ reinit_completion(&fd_core->halt_complete);
+
+ if (hw_static_info->enable_errata_wa.single_irq_only) {
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.irq_mask,
+ CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE));
+ }
+
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.hw_stop, 0x1);
+
+ time_left = wait_for_completion_timeout(&fd_core->halt_complete,
+ msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
+ if (time_left <= 0)
+ CAM_WARN(CAM_FD, "HW halt timeout time_left=%ld", time_left);
+
+ CAM_DBG(CAM_FD, "FD Wrapper Halt complete");
+
+ return 0;
+}
+
+static int cam_fd_hw_util_processcmd_prestart(struct cam_hw_info *fd_hw,
+ struct cam_fd_hw_cmd_prestart_args *prestart_args)
+{
+ struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+ struct cam_fd_hw_static_info *hw_static_info =
+ ((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+ struct cam_fd_ctx_hw_private *ctx_hw_private =
+ prestart_args->ctx_hw_private;
+ uint32_t size, size_required = 0;
+ uint32_t mem_base;
+ uint32_t *cmd_buf_addr = prestart_args->cmd_buf_addr;
+ uint32_t reg_val_pair[CAM_FD_REG_VAL_PAIR_SIZE];
+ uint32_t num_cmds = 0;
+ int i;
+ struct cam_fd_hw_io_buffer *io_buf;
+ struct cam_fd_hw_req_private *req_private;
+ uint32_t available_size = prestart_args->size;
+ bool work_buffer_configured = false;
+
+ if (!ctx_hw_private || !cmd_buf_addr) {
+ CAM_ERR(CAM_FD, "Invalid input prestart args %pK %pK",
+ ctx_hw_private, cmd_buf_addr);
+ return -EINVAL;
+ }
+
+ if (prestart_args->get_raw_results &&
+ !hw_static_info->results.raw_results_available) {
+ CAM_ERR(CAM_FD, "Raw results not supported %d %d",
+ prestart_args->get_raw_results,
+ hw_static_info->results.raw_results_available);
+ return -EINVAL;
+ }
+
+ req_private = &prestart_args->hw_req_private;
+ req_private->ctx_hw_private = prestart_args->ctx_hw_private;
+ req_private->request_id = prestart_args->request_id;
+ req_private->get_raw_results = prestart_args->get_raw_results;
+ req_private->fd_results = NULL;
+ req_private->raw_results = NULL;
+
+ /* Start preparing CDM register values that KMD has to insert */
+ num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
+ hw_static_info->core_regs.control, 0x1);
+ num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
+ hw_static_info->core_regs.control, 0x0);
+
+ for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
+ io_buf = &prestart_args->input_buf[i];
+
+ if (io_buf->valid == false)
+ break;
+
+ if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+ CAM_ERR(CAM_FD, "Incorrect direction %d %d",
+ io_buf->io_cfg->direction, CAM_BUF_INPUT);
+ return -EINVAL;
+ }
+
+ switch (io_buf->io_cfg->resource_type) {
+ case CAM_FD_INPUT_PORT_ID_IMAGE: {
+ if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
+ CAM_ERR(CAM_FD,
+ "Invalid reg_val pair size %d, %d",
+ num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
+ return -EINVAL;
+ }
+
+ num_cmds = cam_fd_cdm_write_reg_val_pair(
+ reg_val_pair, num_cmds,
+ hw_static_info->core_regs.image_addr,
+ io_buf->io_addr[0]);
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FD, "Invalid resource type %d",
+ io_buf->io_cfg->resource_type);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
+ io_buf = &prestart_args->output_buf[i];
+
+ if (io_buf->valid == false)
+ break;
+
+ if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+ CAM_ERR(CAM_FD, "Incorrect direction %d %d",
+ io_buf->io_cfg->direction, CAM_BUF_INPUT);
+ return -EINVAL;
+ }
+
+ switch (io_buf->io_cfg->resource_type) {
+ case CAM_FD_OUTPUT_PORT_ID_RESULTS: {
+ uint32_t face_results_offset;
+
+ size_required = hw_static_info->results.max_faces *
+ hw_static_info->results.per_face_entries * 4;
+
+ if (io_buf->io_cfg->planes[0].plane_stride <
+ size_required) {
+ CAM_ERR(CAM_FD, "Invalid results size %d %d",
+ io_buf->io_cfg->planes[0].plane_stride,
+ size_required);
+ return -EINVAL;
+ }
+
+ req_private->fd_results =
+ (struct cam_fd_results *)io_buf->cpu_addr[0];
+
+ face_results_offset =
+ (uint8_t *)&req_private->fd_results->faces[0] -
+ (uint8_t *)req_private->fd_results;
+
+ if (hw_static_info->ro_mode_supported) {
+ if ((num_cmds + 4) > CAM_FD_REG_VAL_PAIR_SIZE) {
+ CAM_ERR(CAM_FD,
+ "Invalid reg_val size %d, %d",
+ num_cmds,
+ CAM_FD_REG_VAL_PAIR_SIZE);
+ return -EINVAL;
+ }
+ /*
+ * Face data actually starts 16bytes later in
+ * the io buffer Check cam_fd_results.
+ */
+ num_cmds = cam_fd_cdm_write_reg_val_pair(
+ reg_val_pair, num_cmds,
+ hw_static_info->core_regs.result_addr,
+ io_buf->io_addr[0] +
+ face_results_offset);
+ num_cmds = cam_fd_cdm_write_reg_val_pair(
+ reg_val_pair, num_cmds,
+ hw_static_info->core_regs.ro_mode,
+ 0x1);
+
+ req_private->ro_mode_enabled = true;
+ } else {
+ req_private->ro_mode_enabled = false;
+ }
+ break;
+ }
+ case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS: {
+ size_required =
+ hw_static_info->results.raw_results_entries *
+ sizeof(uint32_t);
+
+ if (io_buf->io_cfg->planes[0].plane_stride <
+ size_required) {
+ CAM_ERR(CAM_FD, "Invalid results size %d %d",
+ io_buf->io_cfg->planes[0].plane_stride,
+ size_required);
+ return -EINVAL;
+ }
+
+ req_private->raw_results =
+ (uint32_t *)io_buf->cpu_addr[0];
+ break;
+ }
+ case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER: {
+ if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
+ CAM_ERR(CAM_FD,
+ "Invalid reg_val pair size %d, %d",
+ num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
+ return -EINVAL;
+ }
+
+ num_cmds = cam_fd_cdm_write_reg_val_pair(
+ reg_val_pair, num_cmds,
+ hw_static_info->core_regs.work_addr,
+ io_buf->io_addr[0]);
+
+ work_buffer_configured = true;
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FD, "Invalid resource type %d",
+ io_buf->io_cfg->resource_type);
+ return -EINVAL;
+ }
+ }
+
+ if (!req_private->fd_results || !work_buffer_configured) {
+ CAM_ERR(CAM_FD, "Invalid IO Buffers results=%pK work=%d",
+ req_private->fd_results, work_buffer_configured);
+ return -EINVAL;
+ }
+
+ /* First insert CHANGE_BASE command */
+ size = ctx_hw_private->cdm_ops->cdm_required_size_changebase();
+ /* since cdm returns dwords, we need to convert it into bytes */
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_FD, "buf size:%d is not sufficient, expected: %d",
+ prestart_args->size, size);
+ return -EINVAL;
+ }
+
+ mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info,
+ ((struct cam_fd_soc_private *)soc_info->soc_private)->
+ regbase_index[CAM_FD_REG_CORE]);
+
+ ctx_hw_private->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
+ num_cmds/2);
+ /* cdm util returns dwords, need to convert to bytes */
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
+ available_size, size);
+ return -ENOMEM;
+ }
+ ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
+ reg_val_pair);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ /* Update pre_config_buf_size in bytes */
+ prestart_args->pre_config_buf_size =
+ prestart_args->size - available_size;
+
+ /* Insert start trigger command into CDM as post config commands. */
+ num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, 0,
+ hw_static_info->core_regs.control, 0x2);
+ size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
+ num_cmds/2);
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
+ available_size, size);
+ return -ENOMEM;
+ }
+ ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
+ reg_val_pair);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ prestart_args->post_config_buf_size = size * 4;
+
+ CAM_DBG(CAM_FD, "PreConfig [%pK %d], PostConfig[%pK %d]",
+ prestart_args->cmd_buf_addr, prestart_args->pre_config_buf_size,
+ cmd_buf_addr, prestart_args->post_config_buf_size);
+
+ for (i = 0; i < (prestart_args->pre_config_buf_size +
+ prestart_args->post_config_buf_size) / 4; i++)
+ CAM_DBG(CAM_FD, "CDM KMD Commands [%d] : [%pK] [0x%x]", i,
+ &prestart_args->cmd_buf_addr[i],
+ prestart_args->cmd_buf_addr[i]);
+
+ return 0;
+}
+
+static int cam_fd_hw_util_processcmd_frame_done(struct cam_hw_info *fd_hw,
+ struct cam_fd_hw_frame_done_args *frame_done_args)
+{
+ struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+ struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+ struct cam_fd_hw_req_private *req_private;
+ uint32_t base, face_cnt;
+ uint32_t *buffer;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ if ((fd_core->core_state != CAM_FD_CORE_STATE_IDLE) ||
+ (fd_core->results_valid == false) ||
+ !fd_core->hw_req_private) {
+ CAM_ERR(CAM_FD,
+ "Invalid state for results state=%d, results=%d %pK",
+ fd_core->core_state, fd_core->results_valid,
+ fd_core->hw_req_private);
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+ return -EINVAL;
+ }
+ fd_core->core_state = CAM_FD_CORE_STATE_READING_RESULTS;
+ req_private = fd_core->hw_req_private;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ /*
+ * Copy the register value as is into output buffers.
+ * Wehter we are copying the output data by reading registers or
+ * programming output buffer directly to HW must be transparent to UMD.
+ * In case HW supports writing face count value directly into
+ * DDR memory in future, these values should match.
+ */
+ req_private->fd_results->face_count =
+ cam_fd_soc_register_read(&fd_hw->soc_info, CAM_FD_REG_CORE,
+ hw_static_info->core_regs.result_cnt);
+
+ face_cnt = req_private->fd_results->face_count & 0x3F;
+
+ if (face_cnt > hw_static_info->results.max_faces) {
+ CAM_WARN(CAM_FD, "Face count greater than max %d %d",
+ face_cnt, hw_static_info->results.max_faces);
+ face_cnt = hw_static_info->results.max_faces;
+ }
+
+ CAM_DBG(CAM_FD, "ReqID[%lld] Faces Detected = %d",
+ req_private->request_id, face_cnt);
+
+ /*
+ * We need to read the face data information from registers only
+ * if one of below is true
+ * 1. RO mode is not set. i.e FD HW doesn't write face data into
+ * DDR memory
+ * 2. On the current chipset, results written into DDR memory by FD HW
+ * are not gauranteed to be correct
+ */
+ if (!req_private->ro_mode_enabled ||
+ hw_static_info->enable_errata_wa.ro_mode_results_invalid) {
+ buffer = (uint32_t *)&req_private->fd_results->faces[0];
+ base = hw_static_info->core_regs.results_reg_base;
+
+ /*
+ * Write register values as is into face data buffer. Its UMD
+ * driver responsibility to interpret the data and extract face
+ * properties from output buffer. Think in case output buffer
+ * is directly programmed to HW, then KMD has no control to
+ * extract the face properties and UMD anyway has to extract
+ * face properties. So we follow the same approach and keep
+ * this transparent to UMD.
+ */
+ for (i = 0;
+ i < (face_cnt *
+ hw_static_info->results.per_face_entries); i++) {
+ *buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
+ CAM_FD_REG_CORE, base + (i * 0x4));
+ CAM_DBG(CAM_FD, "FaceData[%d] : 0x%x", i / 4, *buffer);
+ buffer++;
+ }
+ }
+
+ if (req_private->get_raw_results &&
+ req_private->raw_results &&
+ hw_static_info->results.raw_results_available) {
+ buffer = req_private->raw_results;
+ base = hw_static_info->core_regs.raw_results_reg_base;
+
+ for (i = 0;
+ i < hw_static_info->results.raw_results_entries;
+ i++) {
+ *buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
+ CAM_FD_REG_CORE, base + (i * 0x4));
+ CAM_DBG(CAM_FD, "RawData[%d] : 0x%x", i, *buffer);
+ buffer++;
+ }
+ }
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_core->hw_req_private = NULL;
+ fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ return 0;
+}
+
+irqreturn_t cam_fd_hw_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)data;
+ struct cam_fd_core *fd_core;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_fd_hw_static_info *hw_static_info;
+ uint32_t reg_value;
+ enum cam_fd_hw_irq_type irq_type = CAM_FD_IRQ_FRAME_DONE;
+ uint32_t num_irqs = 0;
+
+ if (!fd_hw) {
+ CAM_ERR(CAM_FD, "Invalid data in IRQ callback");
+ return IRQ_NONE;
+ }
+
+ fd_core = (struct cam_fd_core *) fd_hw->core_info;
+ soc_info = &fd_hw->soc_info;
+ hw_static_info = fd_core->hw_static_info;
+
+ reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.irq_status);
+
+ CAM_DBG(CAM_FD, "FD IRQ status 0x%x", reg_value);
+
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.irq_clear,
+ reg_value);
+
+ if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE)) {
+ complete_all(&fd_core->halt_complete);
+ irq_type = CAM_FD_IRQ_HALT_DONE;
+ num_irqs++;
+ }
+
+ if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE)) {
+ complete_all(&fd_core->reset_complete);
+ irq_type = CAM_FD_IRQ_RESET_DONE;
+ num_irqs++;
+ }
+
+ if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE)) {
+ complete_all(&fd_core->processing_complete);
+ irq_type = CAM_FD_IRQ_FRAME_DONE;
+ num_irqs++;
+ }
+
+ /*
+ * We should never get an IRQ callback with no or more than one mask.
+ * Validate first to make sure nothing going wrong.
+ */
+ if (num_irqs != 1) {
+ CAM_ERR(CAM_FD,
+ "Invalid number of IRQs, value=0x%x, num_irqs=%d",
+ reg_value, num_irqs);
+ return IRQ_NONE;
+ }
+
+ trace_cam_irq_activated("FD", irq_type);
+
+ if (irq_type == CAM_FD_IRQ_HALT_DONE) {
+ /*
+ * Do not send HALT IRQ callback to Hw Mgr,
+ * a reset would always follow
+ */
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&fd_core->spin_lock);
+ /* Do not change state to IDLE on HALT IRQ. Reset must follow halt */
+ if ((irq_type == CAM_FD_IRQ_RESET_DONE) ||
+ (irq_type == CAM_FD_IRQ_FRAME_DONE)) {
+
+ fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+ if (irq_type == CAM_FD_IRQ_FRAME_DONE)
+ fd_core->results_valid = true;
+
+ CAM_DBG(CAM_FD, "FD IRQ type %d, state=%d",
+ irq_type, fd_core->core_state);
+ }
+ spin_unlock(&fd_core->spin_lock);
+
+ if (fd_core->irq_cb.cam_fd_hw_mgr_cb)
+ fd_core->irq_cb.cam_fd_hw_mgr_cb(fd_core->irq_cb.data,
+ irq_type);
+
+ return IRQ_HANDLED;
+}
+
+int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+ struct cam_fd_core *fd_core;
+ struct cam_fd_hw_caps *fd_hw_caps =
+ (struct cam_fd_hw_caps *)get_hw_cap_args;
+
+ if (!hw_priv || !get_hw_cap_args) {
+ CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK",
+ hw_priv, get_hw_cap_args);
+ return -EINVAL;
+ }
+
+ fd_core = (struct cam_fd_core *)fd_hw->core_info;
+ *fd_hw_caps = fd_core->hw_caps;
+
+ CAM_DBG(CAM_FD, "core:%d.%d wrapper:%d.%d mode:%d, raw:%d",
+ fd_hw_caps->core_version.major,
+ fd_hw_caps->core_version.minor,
+ fd_hw_caps->wrapper_version.major,
+ fd_hw_caps->wrapper_version.minor,
+ fd_hw_caps->supported_modes,
+ fd_hw_caps->raw_results_available);
+
+ return 0;
+}
+
+int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+ struct cam_fd_core *fd_core;
+ struct cam_fd_hw_init_args *init_args =
+ (struct cam_fd_hw_init_args *)init_hw_args;
+ int rc = 0;
+ unsigned long flags;
+
+ if (!fd_hw || !init_args) {
+ CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args);
+ return -EINVAL;
+ }
+
+ if (arg_size != sizeof(struct cam_fd_hw_init_args)) {
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+ sizeof(struct cam_fd_hw_init_args));
+ return -EINVAL;
+ }
+
+ fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+ mutex_lock(&fd_hw->hw_mutex);
+ CAM_DBG(CAM_FD, "FD HW Init ref count before %d", fd_hw->open_count);
+
+ if (fd_hw->open_count > 0) {
+ rc = 0;
+ goto cdm_streamon;
+ }
+
+ rc = cam_fd_soc_enable_resources(&fd_hw->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Enable SOC failed, rc=%d", rc);
+ goto unlock_return;
+ }
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ rc = cam_fd_hw_reset(hw_priv, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc);
+ goto disable_soc;
+ }
+
+ cam_fd_hw_util_enable_power_on_settings(fd_hw);
+
+cdm_streamon:
+ fd_hw->open_count++;
+ CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
+
+ if (init_args->ctx_hw_private) {
+ struct cam_fd_ctx_hw_private *ctx_hw_private =
+ init_args->ctx_hw_private;
+
+ rc = cam_cdm_stream_on(ctx_hw_private->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d",
+ ctx_hw_private->cdm_handle, rc);
+ fd_hw->open_count--;
+ if (!fd_hw->open_count)
+ goto disable_soc;
+ }
+ }
+
+ mutex_unlock(&fd_hw->hw_mutex);
+
+ return rc;
+
+disable_soc:
+ if (cam_fd_soc_disable_resources(&fd_hw->soc_info))
+ CAM_ERR(CAM_FD, "Error in disable soc resources");
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+unlock_return:
+ mutex_unlock(&fd_hw->hw_mutex);
+ return rc;
+}
+
+int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = hw_priv;
+ struct cam_fd_core *fd_core = NULL;
+ struct cam_fd_hw_deinit_args *deinit_args =
+ (struct cam_fd_hw_deinit_args *)deinit_hw_args;
+ int rc = 0;
+ unsigned long flags;
+
+ if (!fd_hw || !deinit_hw_args) {
+ CAM_ERR(CAM_FD, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) {
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+ sizeof(struct cam_fd_hw_deinit_args));
+ return -EINVAL;
+ }
+
+ mutex_lock(&fd_hw->hw_mutex);
+ if (fd_hw->open_count == 0) {
+ mutex_unlock(&fd_hw->hw_mutex);
+ CAM_ERR(CAM_FD, "Error Unbalanced deinit");
+ return -EFAULT;
+ }
+
+ fd_hw->open_count--;
+ CAM_DBG(CAM_FD, "FD HW ref count=%d", fd_hw->open_count);
+
+ if (fd_hw->open_count > 0) {
+ rc = 0;
+ goto positive_ref_cnt;
+ }
+
+ rc = cam_fd_soc_disable_resources(&fd_hw->soc_info);
+ if (rc)
+ CAM_ERR(CAM_FD, "Failed in Disable SOC, rc=%d", rc);
+
+ fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+ /* With the ref_cnt correct, this should never happen */
+ WARN_ON(!fd_core);
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+positive_ref_cnt:
+ if (deinit_args->ctx_hw_private) {
+ struct cam_fd_ctx_hw_private *ctx_hw_private =
+ deinit_args->ctx_hw_private;
+
+ rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_FD,
+ "Failed in CDM StreamOff, handle=0x%x, rc=%d",
+ ctx_hw_private->cdm_handle, rc);
+ }
+ }
+
+ mutex_unlock(&fd_hw->hw_mutex);
+ return rc;
+}
+
+int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+ struct cam_fd_core *fd_core;
+ struct cam_fd_hw_static_info *hw_static_info;
+ struct cam_hw_soc_info *soc_info;
+ unsigned long flags;
+ int rc;
+
+ if (!fd_hw) {
+ CAM_ERR(CAM_FD, "Invalid input handle");
+ return -EINVAL;
+ }
+
+ fd_core = (struct cam_fd_core *)fd_hw->core_info;
+ hw_static_info = fd_core->hw_static_info;
+ soc_info = &fd_hw->soc_info;
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+ (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
+ CAM_ERR(CAM_FD, "Reset not allowed in %d state",
+ fd_core->core_state);
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+ return -EINVAL;
+ }
+
+ fd_core->results_valid = false;
+ fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.cgc_disable, 0x1);
+
+ rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
+ return rc;
+ }
+
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.cgc_disable, 0x0);
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ return rc;
+}
+
+int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+ struct cam_fd_core *fd_core;
+ struct cam_fd_hw_static_info *hw_static_info;
+ struct cam_fd_hw_cmd_start_args *start_args =
+ (struct cam_fd_hw_cmd_start_args *)hw_start_args;
+ struct cam_fd_ctx_hw_private *ctx_hw_private;
+ unsigned long flags;
+ int rc;
+
+ if (!hw_priv || !start_args) {
+ CAM_ERR(CAM_FD, "Invalid input args %pK %pK", hw_priv,
+ start_args);
+ return -EINVAL;
+ }
+
+ if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) {
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+ sizeof(struct cam_fd_hw_cmd_start_args));
+ return -EINVAL;
+ }
+
+ fd_core = (struct cam_fd_core *)fd_hw->core_info;
+ hw_static_info = fd_core->hw_static_info;
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ if (fd_core->core_state != CAM_FD_CORE_STATE_IDLE) {
+ CAM_ERR(CAM_FD, "Cannot start in %d state",
+ fd_core->core_state);
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * We are about to start FD HW processing, save the request
+ * private data which is being processed by HW. Once the frame
+ * processing is finished, process_cmd(FRAME_DONE) should be called
+ * with same hw_req_private as input.
+ */
+ fd_core->hw_req_private = start_args->hw_req_private;
+ fd_core->core_state = CAM_FD_CORE_STATE_PROCESSING;
+ fd_core->results_valid = false;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ ctx_hw_private = start_args->ctx_hw_private;
+
+ /* Before starting HW process, clear processing complete */
+ reinit_completion(&fd_core->processing_complete);
+
+ if (hw_static_info->enable_errata_wa.single_irq_only) {
+ cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.irq_mask,
+ CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE));
+ }
+
+ if (start_args->num_hw_update_entries > 0) {
+ struct cam_cdm_bl_request *cdm_cmd = ctx_hw_private->cdm_cmd;
+ struct cam_hw_update_entry *cmd;
+ int i;
+
+ cdm_cmd->cmd_arrary_count = start_args->num_hw_update_entries;
+ cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+ cdm_cmd->flag = false;
+ cdm_cmd->userdata = NULL;
+ cdm_cmd->cookie = 0;
+
+ for (i = 0 ; i <= start_args->num_hw_update_entries; i++) {
+ cmd = (start_args->hw_update_entries + i);
+ cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+ cdm_cmd->cmd[i].offset = cmd->offset;
+ cdm_cmd->cmd[i].len = cmd->len;
+ }
+
+ rc = cam_cdm_submit_bls(ctx_hw_private->cdm_handle, cdm_cmd);
+ if (rc) {
+ CAM_ERR(CAM_FD,
+ "Failed to submit cdm commands, rc=%d", rc);
+ goto error;
+ }
+ } else {
+ CAM_ERR(CAM_FD, "Invalid number of hw update entries");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ return 0;
+error:
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ return rc;
+}
+
+int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+ struct cam_fd_core *fd_core;
+ struct cam_fd_hw_static_info *hw_static_info;
+ struct cam_hw_soc_info *soc_info;
+ unsigned long flags;
+ int rc;
+
+ if (!fd_hw) {
+ CAM_ERR(CAM_FD, "Invalid input handle");
+ return -EINVAL;
+ }
+
+ fd_core = (struct cam_fd_core *)fd_hw->core_info;
+ hw_static_info = fd_core->hw_static_info;
+ soc_info = &fd_hw->soc_info;
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+ (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
+ CAM_ERR(CAM_FD, "Reset not allowed in %d state",
+ fd_core->core_state);
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+ return -EINVAL;
+ }
+
+ fd_core->results_valid = false;
+ fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.cgc_disable, 0x1);
+
+ rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
+ return rc;
+ }
+
+ /* HALT must be followed by RESET */
+ rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
+ return rc;
+ }
+
+ cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+ hw_static_info->wrapper_regs.cgc_disable, 0x0);
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+ return rc;
+}
+
+int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+ int rc = -EINVAL;
+ struct cam_fd_ctx_hw_private *ctx_hw_private;
+ struct cam_fd_hw_reserve_args *reserve_args =
+ (struct cam_fd_hw_reserve_args *)hw_reserve_args;
+ struct cam_cdm_acquire_data cdm_acquire;
+ struct cam_cdm_bl_request *cdm_cmd;
+ int i;
+
+ if (!fd_hw || !reserve_args) {
+ CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, reserve_args);
+ return -EINVAL;
+ }
+
+ if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) {
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+ sizeof(struct cam_fd_hw_reserve_args));
+ return -EINVAL;
+ }
+
+ cdm_cmd = kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+ ((CAM_FD_MAX_HW_ENTRIES - 1) *
+ sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+ if (!cdm_cmd)
+ return -ENOMEM;
+
+ ctx_hw_private = kzalloc(sizeof(struct cam_fd_ctx_hw_private),
+ GFP_KERNEL);
+ if (!ctx_hw_private) {
+ kfree(cdm_cmd);
+ return -ENOMEM;
+ }
+
+ memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+ strlcpy(cdm_acquire.identifier, "fd", sizeof("fd"));
+ cdm_acquire.cell_index = fd_hw->soc_info.index;
+ cdm_acquire.handle = 0;
+ cdm_acquire.userdata = ctx_hw_private;
+ cdm_acquire.cam_cdm_callback = cam_fd_hw_util_cdm_callback;
+ cdm_acquire.id = CAM_CDM_VIRTUAL;
+ cdm_acquire.base_array_cnt = fd_hw->soc_info.num_reg_map;
+ for (i = 0; i < fd_hw->soc_info.num_reg_map; i++)
+ cdm_acquire.base_array[i] = &fd_hw->soc_info.reg_map[i];
+
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed to acquire the CDM HW");
+ goto error;
+ }
+
+ ctx_hw_private->hw_ctx = reserve_args->hw_ctx;
+ ctx_hw_private->fd_hw = fd_hw;
+ ctx_hw_private->mode = reserve_args->mode;
+ ctx_hw_private->cdm_handle = cdm_acquire.handle;
+ ctx_hw_private->cdm_ops = cdm_acquire.ops;
+ ctx_hw_private->cdm_cmd = cdm_cmd;
+
+ reserve_args->ctx_hw_private = ctx_hw_private;
+
+ CAM_DBG(CAM_FD, "private=%pK, hw_ctx=%pK, mode=%d, cdm_handle=0x%x",
+ ctx_hw_private, ctx_hw_private->hw_ctx, ctx_hw_private->mode,
+ ctx_hw_private->cdm_handle);
+
+ return 0;
+error:
+ kfree(ctx_hw_private);
+ kfree(cdm_cmd);
+ return rc;
+}
+
+int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+ int rc = -EINVAL;
+ struct cam_fd_ctx_hw_private *ctx_hw_private;
+ struct cam_fd_hw_release_args *release_args =
+ (struct cam_fd_hw_release_args *)hw_release_args;
+
+ if (!fd_hw || !release_args) {
+ CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, release_args);
+ return -EINVAL;
+ }
+
+ if (arg_size != sizeof(struct cam_fd_hw_release_args)) {
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+ sizeof(struct cam_fd_hw_release_args));
+ return -EINVAL;
+ }
+
+ ctx_hw_private =
+ (struct cam_fd_ctx_hw_private *)release_args->ctx_hw_private;
+
+ rc = cam_cdm_release(ctx_hw_private->cdm_handle);
+ if (rc)
+ CAM_ERR(CAM_FD, "Release cdm handle failed, handle=0x%x, rc=%d",
+ ctx_hw_private->cdm_handle, rc);
+
+ kfree(ctx_hw_private->cdm_cmd);
+ kfree(ctx_hw_private);
+ release_args->ctx_hw_private = NULL;
+
+ return 0;
+}
+
+int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+ int rc = -EINVAL;
+
+ if (!hw_priv || !cmd_args ||
+ (cmd_type >= CAM_FD_HW_CMD_MAX)) {
+ CAM_ERR(CAM_FD, "Invalid arguments %pK %pK %d", hw_priv,
+ cmd_args, cmd_type);
+ return -EINVAL;
+ }
+
+ switch (cmd_type) {
+ case CAM_FD_HW_CMD_REGISTER_CALLBACK: {
+ struct cam_fd_hw_cmd_set_irq_cb *irq_cb_args;
+ struct cam_fd_core *fd_core =
+ (struct cam_fd_core *)fd_hw->core_info;
+
+ if (sizeof(struct cam_fd_hw_cmd_set_irq_cb) != arg_size) {
+ CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ irq_cb_args = (struct cam_fd_hw_cmd_set_irq_cb *)cmd_args;
+ fd_core->irq_cb.cam_fd_hw_mgr_cb =
+ irq_cb_args->cam_fd_hw_mgr_cb;
+ fd_core->irq_cb.data = irq_cb_args->data;
+ rc = 0;
+ break;
+ }
+ case CAM_FD_HW_CMD_PRESTART: {
+ struct cam_fd_hw_cmd_prestart_args *prestart_args;
+
+ if (sizeof(struct cam_fd_hw_cmd_prestart_args) != arg_size) {
+ CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ prestart_args = (struct cam_fd_hw_cmd_prestart_args *)cmd_args;
+ rc = cam_fd_hw_util_processcmd_prestart(fd_hw, prestart_args);
+ break;
+ }
+ case CAM_FD_HW_CMD_FRAME_DONE: {
+ struct cam_fd_hw_frame_done_args *cmd_frame_results;
+
+ if (sizeof(struct cam_fd_hw_frame_done_args) !=
+ arg_size) {
+ CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+ cmd_type, arg_size);
+ break;
+ }
+
+ cmd_frame_results =
+ (struct cam_fd_hw_frame_done_args *)cmd_args;
+ rc = cam_fd_hw_util_processcmd_frame_done(fd_hw,
+ cmd_frame_results);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
new file mode 100644
index 000000000000..c1f39bb6cfa2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
@@ -0,0 +1,244 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_CORE_H_
+#define _CAM_FD_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_defs.h>
+#include <media/cam_fd.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_soc.h"
+
+#define CAM_FD_IRQ_TO_MASK(irq) (1 << (irq))
+#define CAM_FD_MASK_TO_IRQ(mask, irq) ((mask) >> (irq))
+
+#define CAM_FD_HW_HALT_RESET_TIMEOUT 750
+
+/**
+ * enum cam_fd_core_state - FD Core internal states
+ *
+ * @CAM_FD_CORE_STATE_POWERDOWN : Indicates FD core is powered down
+ * @CAM_FD_CORE_STATE_IDLE : Indicates FD HW is in idle state.
+ * Core can be in this state when it is
+ * ready to process frames or when
+ * processing is finished and results are
+ * available
+ * @CAM_FD_CORE_STATE_PROCESSING : Indicates FD core is processing frame
+ * @CAM_FD_CORE_STATE_READING_RESULTS : Indicates results are being read from
+ * FD core
+ * @CAM_FD_CORE_STATE_RESET_PROGRESS : Indicates FD Core is in reset state
+ */
+enum cam_fd_core_state {
+ CAM_FD_CORE_STATE_POWERDOWN,
+ CAM_FD_CORE_STATE_IDLE,
+ CAM_FD_CORE_STATE_PROCESSING,
+ CAM_FD_CORE_STATE_READING_RESULTS,
+ CAM_FD_CORE_STATE_RESET_PROGRESS,
+};
+
+/**
+ * struct cam_fd_ctx_hw_private : HW private information for a specific hw ctx.
+ * This information is populated by HW layer on
+ * reserve() and given back to HW Mgr as private
+ * data for the hw context. This private_data
+ * has to be passed by HW Mgr layer while
+ * further HW layer calls
+ *
+ * @hw_ctx : Corresponding hw_ctx pointer
+ * @fd_hw : FD HW info pointer
+ * @cdm_handle : CDM Handle for this context
+ * @cdm_ops : CDM Ops
+ * @cdm_cmd : CDM command pointer
+ * @mode : Mode this context is running
+ * @curr_req_private : Current Request information
+ *
+ */
+struct cam_fd_ctx_hw_private {
+ void *hw_ctx;
+ struct cam_hw_info *fd_hw;
+ uint32_t cdm_handle;
+ struct cam_cdm_utils_ops *cdm_ops;
+ struct cam_cdm_bl_request *cdm_cmd;
+ enum cam_fd_hw_mode mode;
+ struct cam_fd_hw_req_private *curr_req_private;
+};
+
+/**
+ * struct cam_fd_core_regs : FD HW Core register offsets info
+ *
+ * @version : Offset of version register
+ * @control : Offset of control register
+ * @result_cnt : Offset of result count register
+ * @result_addr : Offset of results address register
+ * @image_addr : Offset of image address register
+ * @work_addr : Offset of work address register
+ * @ro_mode : Offset of ro_mode register
+ * @results_reg_base : Offset of results_reg_base register
+ * @raw_results_reg_base : Offset of raw_results_reg_base register
+ *
+ */
+struct cam_fd_core_regs {
+ uint32_t version;
+ uint32_t control;
+ uint32_t result_cnt;
+ uint32_t result_addr;
+ uint32_t image_addr;
+ uint32_t work_addr;
+ uint32_t ro_mode;
+ uint32_t results_reg_base;
+ uint32_t raw_results_reg_base;
+};
+
+/**
+ * struct cam_fd_core_regs : FD HW Wrapper register offsets info
+ *
+ * @wrapper_version : Offset of wrapper_version register
+ * @cgc_disable : Offset of cgc_disable register
+ * @hw_stop : Offset of hw_stop register
+ * @sw_reset : Offset of sw_reset register
+ * @vbif_req_priority : Offset of vbif_req_priority register
+ * @vbif_priority_level : Offset of vbif_priority_level register
+ * @vbif_done_status : Offset of vbif_done_status register
+ * @irq_mask : Offset of irq mask register
+ * @irq_status : Offset of irq status register
+ * @irq_clear : Offset of irq clear register
+ *
+ */
+struct cam_fd_wrapper_regs {
+ uint32_t wrapper_version;
+ uint32_t cgc_disable;
+ uint32_t hw_stop;
+ uint32_t sw_reset;
+ uint32_t vbif_req_priority;
+ uint32_t vbif_priority_level;
+ uint32_t vbif_done_status;
+ uint32_t irq_mask;
+ uint32_t irq_status;
+ uint32_t irq_clear;
+};
+
+/**
+ * struct cam_fd_hw_errata_wa : FD HW Errata workaround enable/dsiable info
+ *
+ * @single_irq_only : Whether to enable only one irq at any time
+ * @ro_mode_enable_always : Whether to enable ro mode always
+ * @ro_mode_results_invalid : Whether results written directly into output
+ * memory by HW are valid or not
+ */
+struct cam_fd_hw_errata_wa {
+ bool single_irq_only;
+ bool ro_mode_enable_always;
+ bool ro_mode_results_invalid;
+};
+
+/**
+ * struct cam_fd_hw_results_prop : FD HW Results properties
+ *
+ * @max_faces : Maximum number of faces supported
+ * @per_face_entries : Number of register with properties for each face
+ * @raw_results_entries : Number of raw results entries for the full search
+ * @raw_results_available : Whether raw results available on this HW
+ *
+ */
+struct cam_fd_hw_results_prop {
+ uint32_t max_faces;
+ uint32_t per_face_entries;
+ uint32_t raw_results_entries;
+ bool raw_results_available;
+};
+
+/**
+ * struct cam_fd_hw_static_info : FD HW information based on HW version
+ *
+ * @core_version : Core version of FD HW
+ * @wrapper_version : Wrapper version of FD HW
+ * @core_regs : Register offset information for core registers
+ * @wrapper_regs : Register offset information for wrapper registers
+ * @results : Information about results available on this HW
+ * @enable_errata_wa : Errata workaround information
+ * @irq_mask : IRQ mask to enable
+ * @qos_priority : QoS priority setting for this chipset
+ * @qos_priority_level : QoS priority level setting for this chipset
+ * @supported_modes : Supported HW modes on this HW version
+ * @ro_mode_supported : Whether RO mode is supported on this HW
+ *
+ */
+struct cam_fd_hw_static_info {
+ struct cam_hw_version core_version;
+ struct cam_hw_version wrapper_version;
+ struct cam_fd_core_regs core_regs;
+ struct cam_fd_wrapper_regs wrapper_regs;
+ struct cam_fd_hw_results_prop results;
+ struct cam_fd_hw_errata_wa enable_errata_wa;
+ uint32_t irq_mask;
+ uint32_t qos_priority;
+ uint32_t qos_priority_level;
+ uint32_t supported_modes;
+ bool ro_mode_supported;
+};
+
+/**
+ * struct cam_fd_core : FD HW core data structure
+ *
+ * @hw_static_info : HW information specific to version
+ * @hw_caps : HW capabilities
+ * @core_state : Current HW state
+ * @processing_complete : Whether processing is complete
+ * @reset_complete : Whether reset is complete
+ * @halt_complete : Whether halt is complete
+ * @hw_req_private : Request that is being currently processed by HW
+ * @results_valid : Whether HW frame results are available to get
+ * @spin_lock : Mutex to protect shared data in hw layer
+ * @irq_cb : HW Manager callback information
+ *
+ */
+struct cam_fd_core {
+ struct cam_fd_hw_static_info *hw_static_info;
+ struct cam_fd_hw_caps hw_caps;
+ enum cam_fd_core_state core_state;
+ struct completion processing_complete;
+ struct completion reset_complete;
+ struct completion halt_complete;
+ struct cam_fd_hw_req_private *hw_req_private;
+ bool results_valid;
+ spinlock_t spin_lock;
+ struct cam_fd_hw_cmd_set_irq_cb irq_cb;
+};
+
+int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
+ struct cam_fd_hw_caps *hw_caps);
+irqreturn_t cam_fd_hw_irq(int irq_num, void *data);
+
+int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+ uint32_t arg_size);
+int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size);
+int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size);
+int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size);
+int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size);
+int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_fd_hw_read(void *hw_priv, void *read_args, uint32_t arg_size);
+int cam_fd_hw_write(void *hw_priv, void *write_args, uint32_t arg_size);
+int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+
+#endif /* _CAM_FD_HW_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c
new file mode 100644
index 000000000000..c7ef37c65659
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c
@@ -0,0 +1,237 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_fd_hw_v41.h"
+#include "cam_fd_hw_v501.h"
+
+static char fd_dev_name[8];
+
+static int cam_fd_hw_dev_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *fd_hw;
+ struct cam_hw_intf *fd_hw_intf;
+ struct cam_fd_core *fd_core;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_fd_hw_static_info *hw_static_info = NULL;
+ int rc = 0;
+ uint32_t hw_idx;
+ struct cam_fd_hw_init_args init_args;
+ struct cam_fd_hw_deinit_args deinit_args;
+
+ fd_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!fd_hw_intf)
+ return -ENOMEM;
+
+ fd_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!fd_hw) {
+ kfree(fd_hw_intf);
+ return -ENOMEM;
+ }
+
+ fd_core = kzalloc(sizeof(struct cam_fd_core), GFP_KERNEL);
+ if (!fd_core) {
+ kfree(fd_hw);
+ kfree(fd_hw_intf);
+ return -ENOMEM;
+ }
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &hw_idx);
+
+ fd_hw_intf->hw_priv = fd_hw;
+ fd_hw->core_info = fd_core;
+ fd_hw_intf->hw_idx = hw_idx;
+
+ memset(fd_dev_name, 0, sizeof(fd_dev_name));
+ snprintf(fd_dev_name, sizeof(fd_dev_name),
+ "fd%1u", fd_hw_intf->hw_idx);
+
+ fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ fd_hw->soc_info.pdev = pdev;
+ fd_hw->soc_info.dev = &pdev->dev;
+ fd_hw->soc_info.dev_name = fd_dev_name;
+ fd_hw->open_count = 0;
+ mutex_init(&fd_hw->hw_mutex);
+ spin_lock_init(&fd_hw->hw_lock);
+ init_completion(&fd_hw->hw_complete);
+
+ spin_lock_init(&fd_core->spin_lock);
+ init_completion(&fd_core->processing_complete);
+ init_completion(&fd_core->halt_complete);
+ init_completion(&fd_core->reset_complete);
+
+ fd_hw_intf->hw_ops.get_hw_caps = cam_fd_hw_get_hw_caps;
+ fd_hw_intf->hw_ops.init = cam_fd_hw_init;
+ fd_hw_intf->hw_ops.deinit = cam_fd_hw_deinit;
+ fd_hw_intf->hw_ops.reset = cam_fd_hw_reset;
+ fd_hw_intf->hw_ops.reserve = cam_fd_hw_reserve;
+ fd_hw_intf->hw_ops.release = cam_fd_hw_release;
+ fd_hw_intf->hw_ops.start = cam_fd_hw_start;
+ fd_hw_intf->hw_ops.stop = cam_fd_hw_halt_reset;
+ fd_hw_intf->hw_ops.read = NULL;
+ fd_hw_intf->hw_ops.write = NULL;
+ fd_hw_intf->hw_ops.process_cmd = cam_fd_hw_process_cmd;
+ fd_hw_intf->hw_type = CAM_HW_FD;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev || !match_dev->data) {
+ CAM_ERR(CAM_FD, "No Of_match data, %pK", match_dev);
+ rc = -EINVAL;
+ goto free_memory;
+ }
+ hw_static_info = (struct cam_fd_hw_static_info *)match_dev->data;
+ fd_core->hw_static_info = hw_static_info;
+
+ CAM_DBG(CAM_FD, "HW Static Info : version core[%d.%d] wrapper[%d.%d]",
+ hw_static_info->core_version.major,
+ hw_static_info->core_version.minor,
+ hw_static_info->wrapper_version.major,
+ hw_static_info->wrapper_version.minor);
+
+ rc = cam_fd_soc_init_resources(&fd_hw->soc_info, cam_fd_hw_irq, fd_hw);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed to init soc, rc=%d", rc);
+ goto free_memory;
+ }
+
+ memset(&init_args, 0x0, sizeof(init_args));
+ memset(&deinit_args, 0x0, sizeof(deinit_args));
+ rc = cam_fd_hw_init(fd_hw, &init_args, sizeof(init_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed to hw init, rc=%d", rc);
+ goto deinit_platform_res;
+ }
+
+ rc = cam_fd_hw_util_get_hw_caps(fd_hw, &fd_core->hw_caps);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed to get hw caps, rc=%d", rc);
+ goto deinit_hw;
+ }
+
+ rc = cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed to deinit hw, rc=%d", rc);
+ goto deinit_platform_res;
+ }
+
+ platform_set_drvdata(pdev, fd_hw_intf);
+ CAM_DBG(CAM_FD, "FD-%d probe successful", fd_hw_intf->hw_idx);
+
+ return rc;
+
+deinit_hw:
+ if (cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args)))
+ CAM_ERR(CAM_FD, "Failed in hw deinit");
+deinit_platform_res:
+ if (cam_fd_soc_deinit_resources(&fd_hw->soc_info))
+ CAM_ERR(CAM_FD, "Failed in soc deinit");
+ mutex_destroy(&fd_hw->hw_mutex);
+free_memory:
+ kfree(fd_hw);
+ kfree(fd_hw_intf);
+ kfree(fd_core);
+
+ return rc;
+}
+
+static int cam_fd_hw_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct cam_hw_intf *fd_hw_intf;
+ struct cam_hw_info *fd_hw;
+ struct cam_fd_core *fd_core;
+
+ fd_hw_intf = platform_get_drvdata(pdev);
+ if (!fd_hw_intf) {
+ CAM_ERR(CAM_FD, "Invalid fd_hw_intf from pdev");
+ return -EINVAL;
+ }
+
+ fd_hw = fd_hw_intf->hw_priv;
+ if (!fd_hw) {
+ CAM_ERR(CAM_FD, "Invalid fd_hw from fd_hw_intf");
+ rc = -ENODEV;
+ goto free_fd_hw_intf;
+ }
+
+ fd_core = (struct cam_fd_core *)fd_hw->core_info;
+ if (!fd_core) {
+ CAM_ERR(CAM_FD, "Invalid fd_core from fd_hw");
+ rc = -EINVAL;
+ goto deinit_platform_res;
+ }
+
+ kfree(fd_core);
+
+deinit_platform_res:
+ rc = cam_fd_soc_deinit_resources(&fd_hw->soc_info);
+ if (rc)
+ CAM_ERR(CAM_FD, "Error in FD soc deinit, rc=%d", rc);
+
+ mutex_destroy(&fd_hw->hw_mutex);
+ kfree(fd_hw);
+
+free_fd_hw_intf:
+ kfree(fd_hw_intf);
+
+ return rc;
+}
+
+static const struct of_device_id cam_fd_hw_dt_match[] = {
+ {
+ .compatible = "qcom,fd41",
+ .data = &cam_fd_wrapper120_core410_info,
+ },
+ {
+ .compatible = "qcom,fd501",
+ .data = &cam_fd_wrapper200_core501_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_fd_hw_dt_match);
+
+static struct platform_driver cam_fd_hw_driver = {
+ .probe = cam_fd_hw_dev_probe,
+ .remove = cam_fd_hw_dev_remove,
+ .driver = {
+ .name = "cam_fd_hw",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_fd_hw_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_fd_hw_init_module(void)
+{
+ return platform_driver_register(&cam_fd_hw_driver);
+}
+
+static void __exit cam_fd_hw_exit_module(void)
+{
+ platform_driver_unregister(&cam_fd_hw_driver);
+}
+
+module_init(cam_fd_hw_init_module);
+module_exit(cam_fd_hw_exit_module);
+MODULE_DESCRIPTION("CAM FD HW driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
new file mode 100644
index 000000000000..ef3b6c9314e2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
@@ -0,0 +1,289 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_INTF_H_
+#define _CAM_FD_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_fd.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+#define CAM_FD_MAX_IO_BUFFERS 5
+#define CAM_FD_MAX_HW_ENTRIES 5
+
+/**
+ * enum cam_fd_hw_type - Enum for FD HW type
+ *
+ * @CAM_HW_FD : FaceDetection HW type
+ */
+enum cam_fd_hw_type {
+ CAM_HW_FD,
+};
+
+/**
+ * enum cam_fd_hw_mode - Mode in which HW can run
+ *
+ * @CAM_FD_MODE_FACEDETECTION : Face Detection mode in which face search
+ * is done on the given frame
+ * @CAM_FD_MODE_PYRAMID : Pyramid mode where a pyramid image is generated
+ * from an input image
+ */
+enum cam_fd_hw_mode {
+ CAM_FD_MODE_FACEDETECTION = 0x1,
+ CAM_FD_MODE_PYRAMID = 0x2,
+};
+
+/**
+ * enum cam_fd_priority - FD priority levels
+ *
+ * @CAM_FD_PRIORITY_HIGH : Indicates high priority client, driver prioritizes
+ * frame requests coming from contexts with HIGH
+ * priority compared to context with normal priority
+ * @CAM_FD_PRIORITY_NORMAL : Indicates normal priority client
+ */
+enum cam_fd_priority {
+ CAM_FD_PRIORITY_HIGH = 0x0,
+ CAM_FD_PRIORITY_NORMAL,
+};
+
+/**
+ * enum cam_fd_hw_irq_type - FD HW IRQ types
+ *
+ * @CAM_FD_IRQ_FRAME_DONE : Indicates frame processing is finished
+ * @CAM_FD_IRQ_HALT_DONE : Indicates HW halt is finished
+ * @CAM_FD_IRQ_RESET_DONE : Indicates HW reset is finished
+ */
+enum cam_fd_hw_irq_type {
+ CAM_FD_IRQ_FRAME_DONE,
+ CAM_FD_IRQ_HALT_DONE,
+ CAM_FD_IRQ_RESET_DONE,
+};
+
+/**
+ * enum cam_fd_hw_cmd_type - FD HW layer custom commands
+ *
+ * @CAM_FD_HW_CMD_PRESTART : Command to process pre-start settings
+ * @CAM_FD_HW_CMD_FRAME_DONE : Command to process frame done settings
+ * @CAM_FD_HW_CMD_UPDATE_SOC : Command to process soc update
+ * @CAM_FD_HW_CMD_REGISTER_CALLBACK : Command to set hw mgr callback
+ * @CAM_FD_HW_CMD_MAX : Indicates max cmd
+ */
+enum cam_fd_hw_cmd_type {
+ CAM_FD_HW_CMD_PRESTART,
+ CAM_FD_HW_CMD_FRAME_DONE,
+ CAM_FD_HW_CMD_UPDATE_SOC,
+ CAM_FD_HW_CMD_REGISTER_CALLBACK,
+ CAM_FD_HW_CMD_MAX,
+};
+
+/**
+ * struct cam_fd_hw_io_buffer : FD HW IO Buffer information
+ *
+ * @valid : Whether this IO Buf configuration is valid
+ * @io_cfg : IO Configuration information
+ * @num_buf : Number planes in io_addr, cpu_addr array
+ * @io_addr : Array of IO address information for planes
+ * @cpu_addr : Array of CPU address information for planes
+ */
+struct cam_fd_hw_io_buffer {
+ bool valid;
+ struct cam_buf_io_cfg *io_cfg;
+ uint32_t num_buf;
+ uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+ uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_fd_hw_req_private : FD HW layer's private information
+ * specific to a request
+ *
+ * @ctx_hw_private : FD HW layer's ctx specific private data
+ * @request_id : Request ID corresponding to this private information
+ * @get_raw_results : Whether to get raw results for this request
+ * @ro_mode_enabled : Whether RO mode is enabled for this request
+ * @fd_results : Pointer to save face detection results
+ * @raw_results : Pointer to save face detection raw results
+ */
+struct cam_fd_hw_req_private {
+ void *ctx_hw_private;
+ uint64_t request_id;
+ bool get_raw_results;
+ bool ro_mode_enabled;
+ struct cam_fd_results *fd_results;
+ uint32_t *raw_results;
+};
+
+/**
+ * struct cam_fd_hw_reserve_args : Reserve args for this HW context
+ *
+ * @hw_ctx : HW context for which reserve is requested
+ * @mode : Mode for which this reserve is requested
+ * @ctx_hw_private : Pointer to save HW layer's private information specific
+ * to this hw context. This has to be passed while calling
+ * further HW layer calls
+ */
+struct cam_fd_hw_reserve_args {
+ void *hw_ctx;
+ enum cam_fd_hw_mode mode;
+ void *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_release_args : Release args for this HW context
+ *
+ * @hw_ctx : HW context for which release is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_release_args {
+ void *hw_ctx;
+ void *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_init_args : Init args for this HW context
+ *
+ * @hw_ctx : HW context for which init is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_init_args {
+ void *hw_ctx;
+ void *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_deinit_args : Deinit args for this HW context
+ *
+ * @hw_ctx : HW context for which deinit is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_deinit_args {
+ void *hw_ctx;
+ void *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_prestart_args : Prestart command args
+ *
+ * @hw_ctx : HW context which submitted this prestart
+ * @ctx_hw_private : HW layer's private information specific to
+ * this hw context
+ * @request_id : Request ID corresponds to this pre-start command
+ * @get_raw_results : Whether to get raw results for this request
+ * @input_buf : Input IO Buffer information for this request
+ * @output_buf : Output IO Buffer information for this request
+ * @cmd_buf_addr : Command buffer address to fill kmd commands
+ * @size : Size available in command buffer
+ * @pre_config_buf_size : Buffer size filled with commands by KMD that has
+ * to be inserted before umd commands
+ * @post_config_buf_size : Buffer size filled with commands by KMD that has
+ * to be inserted after umd commands
+ * @hw_req_private : HW layer's private information specific to
+ * this request
+ */
+struct cam_fd_hw_cmd_prestart_args {
+ void *hw_ctx;
+ void *ctx_hw_private;
+ uint64_t request_id;
+ bool get_raw_results;
+ struct cam_fd_hw_io_buffer input_buf[CAM_FD_MAX_IO_BUFFERS];
+ struct cam_fd_hw_io_buffer output_buf[CAM_FD_MAX_IO_BUFFERS];
+ uint32_t *cmd_buf_addr;
+ uint32_t size;
+ uint32_t pre_config_buf_size;
+ uint32_t post_config_buf_size;
+ struct cam_fd_hw_req_private hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_start_args : Start command args
+ *
+ * @hw_ctx : HW context which submitting start command
+ * @ctx_hw_private : HW layer's private information specific to
+ * this hw context
+ * @hw_req_private : HW layer's private information specific to
+ * this request
+ * @hw_update_entries : HW update entries corresponds to this request
+ * @num_hw_update_entries : Number of hw update entries
+ */
+struct cam_fd_hw_cmd_start_args {
+ void *hw_ctx;
+ void *ctx_hw_private;
+ struct cam_fd_hw_req_private *hw_req_private;
+ struct cam_hw_update_entry *hw_update_entries;
+ uint32_t num_hw_update_entries;
+};
+
+/**
+ * struct cam_fd_hw_stop_args : Stop command args
+ *
+ * @hw_ctx : HW context which submitting stop command
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ * @request_id : Request ID that need to be stopped
+ * @hw_req_private : HW layer's private information specific to this request
+ */
+struct cam_fd_hw_stop_args {
+ void *hw_ctx;
+ void *ctx_hw_private;
+ uint64_t request_id;
+ struct cam_fd_hw_req_private *hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_frame_done_args : Frame done command args
+ *
+ * @hw_ctx : HW context which submitting frame done request
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ * @request_id : Request ID that need to be stopped
+ * @hw_req_private : HW layer's private information specific to this request
+ */
+struct cam_fd_hw_frame_done_args {
+ void *hw_ctx;
+ void *ctx_hw_private;
+ uint64_t request_id;
+ struct cam_fd_hw_req_private *hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_reset_args : Reset command args
+ *
+ * @hw_ctx : HW context which submitting reset command
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_reset_args {
+ void *hw_ctx;
+ void *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_set_irq_cb : Set IRQ callback command args
+ *
+ * @cam_fd_hw_mgr_cb : HW Mgr's callback pointer
+ * @data : HW Mgr's private data
+ */
+struct cam_fd_hw_cmd_set_irq_cb {
+ int (*cam_fd_hw_mgr_cb)(void *data, enum cam_fd_hw_irq_type irq_type);
+ void *data;
+};
+
+#endif /* _CAM_FD_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
new file mode 100644
index 000000000000..adfd4d839a59
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
@@ -0,0 +1,290 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+
+static bool cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ if (!irq_data)
+ return false;
+
+ CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, irq_type=%d",
+ handle, userdata, irq_data->irq_type);
+
+ return false;
+}
+
+static int cam_fd_hw_soc_util_setup_regbase_indices(
+ struct cam_hw_soc_info *soc_info)
+{
+ struct cam_fd_soc_private *soc_private =
+ (struct cam_fd_soc_private *)soc_info->soc_private;
+ uint32_t index;
+ int rc, i;
+
+ for (i = 0; i < CAM_FD_REG_MAX; i++)
+ soc_private->regbase_index[i] = -1;
+
+ if ((soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) ||
+ (soc_info->num_mem_block != CAM_FD_REG_MAX)) {
+ CAM_ERR(CAM_FD, "Invalid num_mem_block=%d",
+ soc_info->num_mem_block);
+ return -EINVAL;
+ }
+
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+ soc_info->num_mem_block, "fd_core", &index);
+ if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
+ soc_private->regbase_index[CAM_FD_REG_CORE] = index;
+ } else {
+ CAM_ERR(CAM_FD, "regbase not found for FD_CORE, rc=%d, %d %d",
+ rc, index, CAM_FD_REG_MAX);
+ return -EINVAL;
+ }
+
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+ soc_info->num_mem_block, "fd_wrapper", &index);
+ if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
+ soc_private->regbase_index[CAM_FD_REG_WRAPPER] = index;
+ } else {
+ CAM_ERR(CAM_FD, "regbase not found FD_WRAPPER, rc=%d, %d %d",
+ rc, index, CAM_FD_REG_MAX);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_FD, "Reg indices : CORE=%d, WRAPPER=%d",
+ soc_private->regbase_index[CAM_FD_REG_CORE],
+ soc_private->regbase_index[CAM_FD_REG_WRAPPER]);
+
+ return 0;
+}
+
+static int cam_fd_soc_set_clk_flags(struct cam_hw_soc_info *soc_info)
+{
+ int i, rc = 0;
+
+ if (soc_info->num_clk > CAM_SOC_MAX_CLK) {
+ CAM_ERR(CAM_FD, "Invalid num clk %d", soc_info->num_clk);
+ return -EINVAL;
+ }
+
+ /* set memcore and mem periphery logic flags to 0 */
+ for (i = 0; i < soc_info->num_clk; i++) {
+ if ((strcmp(soc_info->clk_name[i], "fd_core_clk") == 0) ||
+ (strcmp(soc_info->clk_name[i], "fd_core_uar_clk") ==
+ 0)) {
+ rc = cam_soc_util_set_clk_flags(soc_info, i,
+ CLKFLAG_NORETAIN_MEM);
+ if (rc)
+ CAM_ERR(CAM_FD,
+ "Failed in NORETAIN_MEM i=%d, rc=%d",
+ i, rc);
+
+ cam_soc_util_set_clk_flags(soc_info, i,
+ CLKFLAG_NORETAIN_PERIPH);
+ if (rc)
+ CAM_ERR(CAM_FD,
+ "Failed in NORETAIN_PERIPH i=%d, rc=%d",
+ i, rc);
+ }
+ }
+
+ return rc;
+}
+
+void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
+ enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value)
+{
+ struct cam_fd_soc_private *soc_private =
+ (struct cam_fd_soc_private *)soc_info->soc_private;
+ int32_t reg_index = soc_private->regbase_index[reg_base];
+
+ CAM_DBG(CAM_FD, "FD_REG_WRITE: Base[%d] Offset[0x%8x] Value[0x%8x]",
+ reg_base, reg_offset, reg_value);
+
+ cam_io_w_mb(reg_value,
+ soc_info->reg_map[reg_index].mem_base + reg_offset);
+}
+
+uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
+ enum cam_fd_reg_base reg_base, uint32_t reg_offset)
+{
+ struct cam_fd_soc_private *soc_private =
+ (struct cam_fd_soc_private *)soc_info->soc_private;
+ int32_t reg_index = soc_private->regbase_index[reg_base];
+ uint32_t reg_value;
+
+ reg_value = cam_io_r_mb(
+ soc_info->reg_map[reg_index].mem_base + reg_offset);
+
+ CAM_DBG(CAM_FD, "FD_REG_READ: Base[%d] Offset[0x%8x] Value[0x%8x]",
+ reg_base, reg_offset, reg_value);
+
+ return reg_value;
+}
+
+int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+{
+ struct cam_fd_soc_private *soc_private = soc_info->soc_private;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ int rc;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = 7200000;
+ axi_vote.uncompressed_bw = 7200000;
+ rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in CPAS START, rc=%d", rc);
+ return -EFAULT;
+ }
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+ true);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error enable platform failed, rc=%d", rc);
+ goto stop_cpas;
+ }
+
+ return rc;
+
+stop_cpas:
+ if (cam_cpas_stop(soc_private->cpas_handle))
+ CAM_ERR(CAM_FD, "Error in CPAS STOP");
+
+ return rc;
+}
+
+
+int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+{
+ struct cam_fd_soc_private *soc_private;
+ int rc = 0;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_FD, "Invalid soc_info param");
+ return -EINVAL;
+ }
+ soc_private = soc_info->soc_private;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_FD, "disable platform resources failed, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_cpas_stop(soc_private->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in CPAS STOP, handle=0x%x, rc=%d",
+ soc_private->cpas_handle, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *private_data)
+{
+ struct cam_fd_soc_private *soc_private;
+ struct cam_cpas_register_params cpas_register_param;
+ int rc;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in get_dt_properties, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+ private_data);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in request_platform_resource rc=%d",
+ rc);
+ return rc;
+ }
+
+ rc = cam_fd_soc_set_clk_flags(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FD, "failed in set_clk_flags rc=%d", rc);
+ goto release_res;
+ }
+
+ soc_private = kzalloc(sizeof(struct cam_fd_soc_private), GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto release_res;
+ }
+ soc_info->soc_private = soc_private;
+
+ rc = cam_fd_hw_soc_util_setup_regbase_indices(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in setup regbase, rc=%d", rc);
+ goto free_soc_private;
+ }
+
+ memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+ strlcpy(cpas_register_param.identifier, "fd", CAM_HW_IDENTIFIER_LENGTH);
+ cpas_register_param.cell_index = soc_info->index;
+ cpas_register_param.dev = &soc_info->pdev->dev;
+ cpas_register_param.userdata = private_data;
+ cpas_register_param.cam_cpas_client_cb = cam_fd_hw_util_cpas_callback;
+
+ rc = cam_cpas_register_client(&cpas_register_param);
+ if (rc) {
+ CAM_ERR(CAM_FD, "CPAS registration failed");
+ goto free_soc_private;
+ }
+ soc_private->cpas_handle = cpas_register_param.client_handle;
+ CAM_DBG(CAM_FD, "CPAS handle=%d", soc_private->cpas_handle);
+
+ return rc;
+
+free_soc_private:
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+release_res:
+ cam_soc_util_release_platform_resource(soc_info);
+
+ return rc;
+}
+
+int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+ struct cam_fd_soc_private *soc_private =
+ (struct cam_fd_soc_private *)soc_info->soc_private;
+ int rc;
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_FD, "Unregister cpas failed, handle=%d, rc=%d",
+ soc_private->cpas_handle, rc);
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+ if (rc)
+ CAM_ERR(CAM_FD, "release platform failed, rc=%d", rc);
+
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h
new file mode 100644
index 000000000000..347c97679016
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_SOC_H_
+#define _CAM_FD_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+/**
+ * enum cam_fd_reg_base - Enum for FD register sets
+ *
+ * @CAM_FD_REG_CORE : Indicates FD Core register space
+ * @CAM_FD_REG_WRAPPER : Indicates FD Wrapper register space
+ * @CAM_FD_REG_MAX : Max number of register sets supported
+ */
+enum cam_fd_reg_base {
+ CAM_FD_REG_CORE,
+ CAM_FD_REG_WRAPPER,
+ CAM_FD_REG_MAX
+};
+
+/**
+ * struct cam_fd_soc_private : FD private SOC information
+ *
+ * @regbase_index : Mapping between Register base enum to register index in SOC
+ * @cpas_handle : CPAS handle
+ *
+ */
+struct cam_fd_soc_private {
+ int32_t regbase_index[CAM_FD_REG_MAX];
+ uint32_t cpas_handle;
+};
+
+int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *private_data);
+int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
+ enum cam_fd_reg_base reg_base, uint32_t reg_offset);
+void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
+ enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value);
+
+#endif /* _CAM_FD_HW_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h
new file mode 100644
index 000000000000..b9beebc41c26
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_V41_H_
+#define _CAM_FD_HW_V41_H_
+
+static struct cam_fd_hw_static_info cam_fd_wrapper120_core410_info = {
+ .core_version = {
+ .major = 4,
+ .minor = 1,
+ .incr = 0,
+ },
+ .wrapper_version = {
+ .major = 1,
+ .minor = 2,
+ .incr = 0,
+ },
+ .core_regs = {
+ .version = 0x38,
+ .control = 0x0,
+ .result_cnt = 0x4,
+ .result_addr = 0x20,
+ .image_addr = 0x24,
+ .work_addr = 0x28,
+ .ro_mode = 0x34,
+ .results_reg_base = 0x400,
+ .raw_results_reg_base = 0x800,
+ },
+ .wrapper_regs = {
+ .wrapper_version = 0x0,
+ .cgc_disable = 0x4,
+ .hw_stop = 0x8,
+ .sw_reset = 0x10,
+ .vbif_req_priority = 0x20,
+ .vbif_priority_level = 0x24,
+ .vbif_done_status = 0x34,
+ .irq_mask = 0x50,
+ .irq_status = 0x54,
+ .irq_clear = 0x58,
+ },
+ .results = {
+ .max_faces = 35,
+ .per_face_entries = 4,
+ .raw_results_available = true,
+ .raw_results_entries = 512,
+ },
+ .enable_errata_wa = {
+ .single_irq_only = true,
+ .ro_mode_enable_always = true,
+ .ro_mode_results_invalid = true,
+ },
+ .irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
+ CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
+ CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
+ .qos_priority = 4,
+ .qos_priority_level = 4,
+ .supported_modes = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
+ .ro_mode_supported = true,
+};
+
+#endif /* _CAM_FD_HW_V41_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h
new file mode 100644
index 000000000000..44b9ab58e566
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FD_HW_V501_H_
+#define _CAM_FD_HW_V501_H_
+
+static struct cam_fd_hw_static_info cam_fd_wrapper200_core501_info = {
+ .core_version = {
+ .major = 5,
+ .minor = 0,
+ .incr = 1,
+ },
+ .wrapper_version = {
+ .major = 2,
+ .minor = 0,
+ .incr = 0,
+ },
+ .core_regs = {
+ .version = 0x38,
+ .control = 0x0,
+ .result_cnt = 0x4,
+ .result_addr = 0x20,
+ .image_addr = 0x24,
+ .work_addr = 0x28,
+ .ro_mode = 0x34,
+ .results_reg_base = 0x400,
+ .raw_results_reg_base = 0x800,
+ },
+ .wrapper_regs = {
+ .wrapper_version = 0x0,
+ .cgc_disable = 0x4,
+ .hw_stop = 0x8,
+ .sw_reset = 0x10,
+ .vbif_req_priority = 0x20,
+ .vbif_priority_level = 0x24,
+ .vbif_done_status = 0x34,
+ .irq_mask = 0x50,
+ .irq_status = 0x54,
+ .irq_clear = 0x58,
+ },
+ .results = {
+ .max_faces = 35,
+ .per_face_entries = 4,
+ .raw_results_available = true,
+ .raw_results_entries = 512,
+ },
+ .enable_errata_wa = {
+ .single_irq_only = true,
+ .ro_mode_enable_always = true,
+ .ro_mode_results_invalid = true,
+ },
+ .irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
+ CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
+ CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
+ .qos_priority = 4,
+ .qos_priority_level = 4,
+ .supported_modes = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
+ .ro_mode_supported = true,
+};
+
+#endif /* _CAM_FD_HW_V501_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/Makefile b/drivers/media/platform/msm/camera_v3/cam_icp/Makefile
new file mode 100644
index 000000000000..d038946dbc86
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.c
new file mode 100644
index 000000000000..cba6f985bca4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.c
@@ -0,0 +1,279 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/cam_sync.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_sync_api.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_icp_context.h"
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+
+static const char icp_dev_name[] = "icp";
+
+static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
+ uint32_t buf_info)
+{
+ struct cam_context *ctx = (struct cam_context *)data;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_temp = NULL;
+ struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
+ int rc = 0;
+ bool b_mem_found = false;
+
+ if (!ctx) {
+ CAM_ERR(CAM_ICP, "Invalid ctx");
+ return -EINVAL;
+ }
+
+ CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d",
+ ctx->ctx_id, ctx->state);
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ pf_dbg_entry = &(req->pf_data);
+ CAM_INFO(CAM_ICP, "req_id : %lld", req->request_id);
+
+ rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+ iova, buf_info, &b_mem_found);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to dump pf info");
+
+ if (b_mem_found)
+ CAM_ERR(CAM_ICP, "Found page fault in req %lld %d",
+ req->request_id, rc);
+ }
+
+ return rc;
+}
+
+static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+ if (!rc) {
+ ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ICP", ctx);
+ }
+
+ return rc;
+}
+
+static int __cam_icp_release_dev_in_acquired(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Unable to release device");
+
+ ctx->state = CAM_CTX_AVAILABLE;
+ trace_cam_context_state("ICP", ctx);
+ return rc;
+}
+
+static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_start_dev_to_hw(ctx, cmd);
+ if (!rc) {
+ ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ICP", ctx);
+ }
+
+ return rc;
+}
+
+static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_flush_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to flush device");
+
+ return rc;
+}
+
+static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ int rc;
+ size_t len;
+ uintptr_t packet_addr;
+ struct cam_packet *packet;
+
+ rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+ &packet_addr, &len);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "[%s][%d] Can not get packet address",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+ (uint32_t)cmd->offset);
+
+ if (((packet->header.op_code & 0xff) ==
+ CAM_ICP_OPCODE_IPE_SETTINGS) ||
+ ((packet->header.op_code & 0xff) ==
+ CAM_ICP_OPCODE_BPS_SETTINGS))
+ rc = cam_context_config_dev_to_hw(ctx, cmd);
+ else
+ rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to prepare device");
+
+ return rc;
+}
+
+static int __cam_icp_stop_dev_in_ready(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_stop_dev_to_hw(ctx);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to stop device");
+
+ ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ICP", ctx);
+ return rc;
+}
+
+static int __cam_icp_release_dev_in_ready(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to stop device");
+
+ rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to release device");
+
+ return rc;
+}
+
+static int __cam_icp_handle_buf_done_in_ready(void *ctx,
+ uint32_t evt_id, void *done)
+{
+ return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+static struct cam_ctx_ops
+ cam_icp_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+ /* Uninit */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Available */
+ {
+ .ioctl_ops = {
+ .acquire_dev = __cam_icp_acquire_dev_in_available,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Acquired */
+ {
+ .ioctl_ops = {
+ .release_dev = __cam_icp_release_dev_in_acquired,
+ .start_dev = __cam_icp_start_dev_in_acquired,
+ .config_dev = __cam_icp_config_dev_in_ready,
+ .flush_dev = __cam_icp_flush_dev_in_ready,
+ },
+ .crm_ops = {},
+ .irq_ops = __cam_icp_handle_buf_done_in_ready,
+ .pagefault_ops = cam_icp_context_dump_active_request,
+ },
+ /* Ready */
+ {
+ .ioctl_ops = {
+ .stop_dev = __cam_icp_stop_dev_in_ready,
+ .release_dev = __cam_icp_release_dev_in_ready,
+ .config_dev = __cam_icp_config_dev_in_ready,
+ .flush_dev = __cam_icp_flush_dev_in_ready,
+ },
+ .crm_ops = {},
+ .irq_ops = __cam_icp_handle_buf_done_in_ready,
+ .pagefault_ops = cam_icp_context_dump_active_request,
+ },
+ /* Activated */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ .pagefault_ops = cam_icp_context_dump_active_request,
+ },
+};
+
+int cam_icp_context_init(struct cam_icp_context *ctx,
+ struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id)
+{
+ int rc;
+
+ if ((!ctx) || (!ctx->base) || (!hw_intf)) {
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", ctx, hw_intf);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = cam_context_init(ctx->base, icp_dev_name, CAM_ICP, ctx_id,
+ NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Camera Context Base init failed");
+ goto err;
+ }
+
+ ctx->base->state_machine = cam_icp_ctx_state_machine;
+ ctx->base->ctx_priv = ctx;
+ ctx->ctxt_to_hw_map = NULL;
+
+err:
+ return rc;
+}
+
+int cam_icp_context_deinit(struct cam_icp_context *ctx)
+{
+ if ((!ctx) || (!ctx->base)) {
+ CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx);
+ return -EINVAL;
+ }
+
+ cam_context_deinit(ctx->base);
+ memset(ctx, 0, sizeof(*ctx));
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.h b/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.h
new file mode 100644
index 000000000000..0c3a360c7de5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ICP_CONTEXT_H_
+#define _CAM_ICP_CONTEXT_H_
+
+#include "cam_context.h"
+
+/**
+ * struct cam_icp_context - icp context
+ * @base: icp context object
+ * @state_machine: state machine for ICP context
+ * @req_base: common request structure
+ * @state: icp context state
+ * @ctxt_to_hw_map: context to FW handle mapping
+ */
+struct cam_icp_context {
+ struct cam_context *base;
+ struct cam_ctx_ops *state_machine;
+ struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+ uint32_t state;
+ void *ctxt_to_hw_map;
+};
+
+/**
+ * cam_icp_context_init() - ICP context init
+ * @ctx: Pointer to context
+ * @hw_intf: Pointer to ICP hardware interface
+ * @ctx_id: ID for this context
+ */
+int cam_icp_context_init(struct cam_icp_context *ctx,
+ struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id);
+
+/**
+ * cam_icp_context_deinit() - ICP context deinit
+ * @ctx: Pointer to context
+ */
+int cam_icp_context_deinit(struct cam_icp_context *ctx);
+
+#endif /* _CAM_ICP_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_subdev.c
new file mode 100644
index 000000000000..699ad5f2a0b4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_subdev.c
@@ -0,0 +1,282 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_req_mgr_dev.h"
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_icp_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+
+#define CAM_ICP_DEV_NAME "cam-icp"
+
+struct cam_icp_subdev {
+ struct cam_subdev sd;
+ struct cam_node *node;
+ struct cam_context ctx[CAM_ICP_CTX_MAX];
+ struct cam_icp_context ctx_icp[CAM_ICP_CTX_MAX];
+ struct mutex icp_lock;
+ int32_t open_cnt;
+ int32_t reserved;
+};
+
+static struct cam_icp_subdev g_icp_dev;
+
+static const struct of_device_id cam_icp_dt_match[] = {
+ {.compatible = "qcom,cam-icp"},
+ {}
+};
+
+static void cam_icp_dev_iommu_fault_handler(
+ struct iommu_domain *domain, struct device *dev, unsigned long iova,
+ int flags, void *token, uint32_t buf_info)
+{
+ int i = 0;
+ struct cam_node *node = NULL;
+
+ if (!token) {
+ CAM_ERR(CAM_ICP, "invalid token in page handler cb");
+ return;
+ }
+
+ node = (struct cam_node *)token;
+
+ for (i = 0; i < node->ctx_size; i++)
+ cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+ buf_info);
+}
+
+static int cam_icp_subdev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+ struct cam_node *node = v4l2_get_subdevdata(sd);
+ int rc = 0;
+
+ mutex_lock(&g_icp_dev.icp_lock);
+ if (g_icp_dev.open_cnt >= 1) {
+ CAM_ERR(CAM_ICP, "ICP subdev is already opened");
+ rc = -EALREADY;
+ goto end;
+ }
+
+ if (!node) {
+ CAM_ERR(CAM_ICP, "Invalid args");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ hw_mgr_intf = &node->hw_mgr_intf;
+ rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "FW download failed");
+ goto end;
+ }
+ g_icp_dev.open_cnt++;
+end:
+ mutex_unlock(&g_icp_dev.icp_lock);
+ return rc;
+}
+
+static int cam_icp_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+ struct cam_node *node = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&g_icp_dev.icp_lock);
+ if (g_icp_dev.open_cnt <= 0) {
+ CAM_DBG(CAM_ICP, "ICP subdev is already closed");
+ rc = -EINVAL;
+ goto end;
+ }
+ g_icp_dev.open_cnt--;
+ if (!node) {
+ CAM_ERR(CAM_ICP, "Invalid args");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ hw_mgr_intf = &node->hw_mgr_intf;
+ if (!hw_mgr_intf) {
+ CAM_ERR(CAM_ICP, "hw_mgr_intf is not initialized");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = cam_node_shutdown(node);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "HW close failed");
+ goto end;
+ }
+
+end:
+ mutex_unlock(&g_icp_dev.icp_lock);
+ return 0;
+}
+
+const struct v4l2_subdev_internal_ops cam_icp_subdev_internal_ops = {
+ .open = cam_icp_subdev_open,
+ .close = cam_icp_subdev_close,
+};
+
+static int cam_icp_probe(struct platform_device *pdev)
+{
+ int rc = 0, i = 0;
+ struct cam_node *node;
+ struct cam_hw_mgr_intf *hw_mgr_intf;
+ int iommu_hdl = -1;
+
+ if (!pdev) {
+ CAM_ERR(CAM_ICP, "pdev is NULL");
+ return -EINVAL;
+ }
+
+ g_icp_dev.sd.pdev = pdev;
+ g_icp_dev.sd.internal_ops = &cam_icp_subdev_internal_ops;
+ rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
+ CAM_ICP_DEVICE_TYPE);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "ICP cam_subdev_probe failed");
+ goto probe_fail;
+ }
+
+ node = (struct cam_node *) g_icp_dev.sd.token;
+
+ hw_mgr_intf = kzalloc(sizeof(*hw_mgr_intf), GFP_KERNEL);
+ if (!hw_mgr_intf) {
+ rc = -EINVAL;
+ goto hw_alloc_fail;
+ }
+
+ rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf,
+ &iommu_hdl);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
+ goto hw_init_fail;
+ }
+
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+ g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
+ rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
+ hw_mgr_intf, i);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "ICP context init failed");
+ goto ctx_fail;
+ }
+ }
+
+ rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
+ CAM_ICP_CTX_MAX, CAM_ICP_DEV_NAME);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "ICP node init failed");
+ goto ctx_fail;
+ }
+
+ cam_smmu_set_client_page_fault_handler(iommu_hdl,
+ cam_icp_dev_iommu_fault_handler, node);
+
+ g_icp_dev.open_cnt = 0;
+ mutex_init(&g_icp_dev.icp_lock);
+
+ CAM_DBG(CAM_ICP, "ICP probe complete");
+
+ return rc;
+
+ctx_fail:
+ for (--i; i >= 0; i--)
+ cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
+hw_init_fail:
+ kfree(hw_mgr_intf);
+hw_alloc_fail:
+ cam_subdev_remove(&g_icp_dev.sd);
+probe_fail:
+ return rc;
+}
+
+static int cam_icp_remove(struct platform_device *pdev)
+{
+ int i;
+ struct v4l2_subdev *sd;
+ struct cam_subdev *subdev;
+
+ if (!pdev) {
+ CAM_ERR(CAM_ICP, "pdev is NULL");
+ return -ENODEV;
+ }
+
+ sd = platform_get_drvdata(pdev);
+ if (!sd) {
+ CAM_ERR(CAM_ICP, "V4l2 subdev is NULL");
+ return -ENODEV;
+ }
+
+ subdev = v4l2_get_subdevdata(sd);
+ if (!subdev) {
+ CAM_ERR(CAM_ICP, "cam subdev is NULL");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+ cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
+ cam_node_deinit(g_icp_dev.node);
+ cam_subdev_remove(&g_icp_dev.sd);
+ mutex_destroy(&g_icp_dev.icp_lock);
+
+ return 0;
+}
+
+static struct platform_driver cam_icp_driver = {
+ .probe = cam_icp_probe,
+ .remove = cam_icp_remove,
+ .driver = {
+ .name = "cam_icp",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_icp_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_icp_init_module(void)
+{
+ return platform_driver_register(&cam_icp_driver);
+}
+
+static void __exit cam_icp_exit_module(void)
+{
+ platform_driver_unregister(&cam_icp_driver);
+}
+module_init(cam_icp_init_module);
+module_exit(cam_icp_exit_module);
+MODULE_DESCRIPTION("MSM ICP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_intf.h
new file mode 100644
index 000000000000..3d0ee725dcad
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_intf.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HFI_INTF_H_
+#define _HFI_INTF_H_
+
+#include <linux/types.h>
+
+/**
+ * struct hfi_mem
+ * @len: length of memory
+ * @kva: kernel virtual address
+ * @iova: IO virtual address
+ * @reserved: reserved field
+ */
+struct hfi_mem {
+ uint64_t len;
+ uintptr_t kva;
+ uint32_t iova;
+ uint32_t reserved;
+};
+
+/**
+ * struct hfi_mem_info
+ * @qtbl: qtable hfi memory
+ * @cmd_q: command queue hfi memory for host to firmware communication
+ * @msg_q: message queue hfi memory for firmware to host communication
+ * @dbg_q: debug queue hfi memory for firmware debug information
+ * @sfr_buf: buffer for subsystem failure reason[SFR]
+ * @sec_heap: secondary heap hfi memory for firmware
+ * @qdss: qdss mapped memory for fw
+ * @icp_base: icp base address
+ */
+struct hfi_mem_info {
+ struct hfi_mem qtbl;
+ struct hfi_mem cmd_q;
+ struct hfi_mem msg_q;
+ struct hfi_mem dbg_q;
+ struct hfi_mem sfr_buf;
+ struct hfi_mem sec_heap;
+ struct hfi_mem shmem;
+ struct hfi_mem qdss;
+ void __iomem *icp_base;
+};
+
+/**
+ * hfi_write_cmd() - function for hfi write
+ * @cmd_ptr: pointer to command data for hfi write
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_write_cmd(void *cmd_ptr);
+
+/**
+ * hfi_read_message() - function for hfi read
+ * @pmsg: buffer to place read message for hfi queue
+ * @q_id: queue id
+ * @words_read: total number of words read from the queue
+ * returned as output to the caller
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id, uint32_t *words_read);
+
+/**
+ * hfi_init() - function initialize hfi after firmware download
+ * @event_driven_mode: event mode
+ * @hfi_mem: hfi memory info
+ * @icp_base: icp base address
+ * @debug: debug flag
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
+ void *__iomem icp_base, bool debug);
+
+/**
+ * hfi_get_hw_caps() - hardware capabilities from firmware
+ * @query_caps: holds query information from hfi
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_get_hw_caps(void *query_caps);
+
+/**
+ * hfi_send_system_cmd() - send hfi system command to firmware
+ * @type: type of system command
+ * @data: command data
+ * @size: size of command data
+ */
+void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size);
+
+/**
+ * cam_hfi_enable_cpu() - enable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_enable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_disable_cpu() - disable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_disable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_deinit() - cleanup HFI
+ */
+void cam_hfi_deinit(void __iomem *icp_base);
+/**
+ * hfi_set_debug_level() - set debug level
+ * @a5_dbg_type: 1 for debug_q & 2 for qdss
+ * @lvl: FW debug message level
+ */
+int hfi_set_debug_level(u64 a5_dbg_type, uint32_t lvl);
+
+/**
+ * hfi_set_fw_dump_level() - set firmware dump level
+ * @lvl: level of firmware dump level
+ */
+int hfi_set_fw_dump_level(uint32_t lvl);
+
+/**
+ * hfi_enable_ipe_bps_pc() - Enable interframe pc
+ * Host sends a command to firmware to enable interframe
+ * power collapse for IPE and BPS hardware.
+ *
+ * @enable: flag to enable/disable
+ * @core_info: Core information to firmware
+ */
+int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info);
+
+/**
+ * hfi_cmd_ubwc_config() - UBWC configuration to firmware
+ * @ubwc_cfg: UBWC configuration parameters
+ */
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
+
+/**
+ * cam_hfi_resume() - function to resume
+ * @hfi_mem: hfi memory info
+ * @icp_base: icp base address
+ * @debug: debug flag
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
+ void __iomem *icp_base, bool debug);
+
+/**
+ * cam_hfi_queue_dump() - utility function to dump hfi queues
+ */
+void cam_hfi_queue_dump(void);
+
+
+#endif /* _HFI_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_reg.h
new file mode 100644
index 000000000000..f652cfa3d2a3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_reg.h
@@ -0,0 +1,340 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HFI_REG_H_
+#define _CAM_HFI_REG_H_
+
+#include <linux/types.h>
+#include "hfi_intf.h"
+
+
+/* start of ICP CSR registers */
+#define HFI_REG_A5_HW_VERSION 0x0
+#define HFI_REG_A5_CSR_NSEC_RESET 0x4
+#define HFI_REG_A5_CSR_A5_CONTROL 0x8
+#define HFI_REG_A5_CSR_ETM 0xC
+#define HFI_REG_A5_CSR_A2HOSTINTEN 0x10
+#define HFI_REG_A5_CSR_A2HOSTINT 0x14
+#define HFI_REG_A5_CSR_A2HOSTINTCLR 0x18
+#define HFI_REG_A5_CSR_A2HOSTINTSTATUS 0x1C
+#define HFI_REG_A5_CSR_A2HOSTINTSET 0x20
+#define HFI_REG_A5_CSR_HOST2ICPINT 0x30
+#define HFI_REG_A5_CSR_A5_STATUS 0x200
+#define HFI_REG_A5_QGIC2_LM_ID 0x204
+#define HFI_REG_A5_SPARE 0x400
+
+/* general purpose registers from */
+#define HFI_REG_FW_VERSION 0x44
+#define HFI_REG_HOST_ICP_INIT_REQUEST 0x48
+#define HFI_REG_ICP_HOST_INIT_RESPONSE 0x4C
+#define HFI_REG_SHARED_MEM_PTR 0x50
+#define HFI_REG_SHARED_MEM_SIZE 0x54
+#define HFI_REG_QTBL_PTR 0x58
+#define HFI_REG_UNCACHED_HEAP_PTR 0x5C
+#define HFI_REG_UNCACHED_HEAP_SIZE 0x60
+#define HFI_REG_QDSS_IOVA 0x6C
+#define HFI_REG_QDSS_IOVA_SIZE 0x70
+#define HFI_REG_SFR_PTR 0x68
+/* end of ICP CSR registers */
+
+/* flags for ICP CSR registers */
+#define ICP_FLAG_CSR_WAKE_UP_EN (1 << 4)
+#define ICP_FLAG_CSR_A5_EN (1 << 9)
+#define ICP_CSR_EN_CLKGATE_WFI (1 << 12)
+#define ICP_CSR_EDBGRQ (1 << 14)
+#define ICP_CSR_DBGSWENABLE (1 << 22)
+#define ICP_CSR_A5_STATUS_WFI (1 << 7)
+
+#define ICP_FLAG_A5_CTRL_DBG_EN (ICP_FLAG_CSR_WAKE_UP_EN|\
+ ICP_FLAG_CSR_A5_EN|\
+ ICP_CSR_EDBGRQ|\
+ ICP_CSR_DBGSWENABLE)
+
+#define ICP_FLAG_A5_CTRL_EN (ICP_FLAG_CSR_WAKE_UP_EN|\
+ ICP_FLAG_CSR_A5_EN|\
+ ICP_CSR_EN_CLKGATE_WFI)
+
+/* start of Queue table and queues */
+#define MAX_ICP_HFI_QUEUES 4
+#define ICP_QHDR_TX_TYPE_MASK 0xFF000000
+#define ICP_QHDR_RX_TYPE_MASK 0x00FF0000
+#define ICP_QHDR_PRI_TYPE_MASK 0x0000FF00
+#define ICP_QHDR_Q_ID_MASK 0x000000FF
+
+#define ICP_CMD_Q_SIZE_IN_BYTES 4096
+#define ICP_MSG_Q_SIZE_IN_BYTES 4096
+#define ICP_DBG_Q_SIZE_IN_BYTES 102400
+#define ICP_MSG_SFR_SIZE_IN_BYTES 4096
+
+#define ICP_SHARED_MEM_IN_BYTES (1024 * 1024)
+#define ICP_UNCACHED_HEAP_SIZE_IN_BYTES (2 * 1024 * 1024)
+#define ICP_HFI_MAX_PKT_SIZE_IN_WORDS 25600
+#define ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS 256
+
+#define ICP_HFI_QTBL_HOSTID1 0x01000000
+#define ICP_HFI_QTBL_STATUS_ENABLED 0x00000001
+#define ICP_HFI_NUMBER_OF_QS 3
+#define ICP_HFI_NUMBER_OF_ACTIVE_QS 3
+#define ICP_HFI_QTBL_OFFSET 0
+#define ICP_HFI_VAR_SIZE_PKT 0
+#define ICP_HFI_MAX_MSG_SIZE_IN_WORDS 128
+
+
+/* Queue Header type masks. Use these to access bitfields in qhdr_type */
+#define HFI_MASK_QHDR_TX_TYPE 0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE 0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE 0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE 0x000000FF
+
+
+#define TX_EVENT_DRIVEN_MODE_1 0
+#define RX_EVENT_DRIVEN_MODE_1 0
+#define TX_EVENT_DRIVEN_MODE_2 0x01000000
+#define RX_EVENT_DRIVEN_MODE_2 0x00010000
+#define TX_EVENT_POLL_MODE_2 0x02000000
+#define RX_EVENT_POLL_MODE_2 0x00020000
+#define U32_OFFSET 0x1
+#define BYTE_WORD_SHIFT 2
+
+/**
+ * @INVALID: Invalid state
+ * @HFI_DEINIT: HFI is not initialized yet
+ * @HFI_INIT: HFI is initialized
+ * @HFI_READY: HFI is ready to send/receive commands/messages
+ */
+enum hfi_state {
+ HFI_DEINIT,
+ HFI_INIT,
+ HFI_READY
+};
+
+/**
+ * @RESET: init success
+ * @SET: init failed
+ */
+enum reg_settings {
+ RESET,
+ SET,
+ SET_WM = 1024
+};
+
+/**
+ * @INTR_DISABLE: Disable interrupt
+ * @INTR_ENABLE: Enable interrupt
+ * @INTR_ENABLE_WD0: Enable WD0
+ * @INTR_ENABLE_WD1: Enable WD1
+ */
+enum intr_status {
+ INTR_DISABLE,
+ INTR_ENABLE,
+ INTR_ENABLE_WD0,
+ INTR_ENABLE_WD1 = 0x4
+};
+
+/**
+ * @ICP_INIT_RESP_RESET: reset init state
+ * @ICP_INIT_RESP_SUCCESS: init success
+ * @ICP_INIT_RESP_FAILED: init failed
+ */
+enum host_init_resp {
+ ICP_INIT_RESP_RESET,
+ ICP_INIT_RESP_SUCCESS,
+ ICP_INIT_RESP_FAILED
+};
+
+/**
+ * @ICP_INIT_REQUEST_RESET: reset init request
+ * @ICP_INIT_REQUEST_SET: set init request
+ */
+enum host_init_request {
+ ICP_INIT_REQUEST_RESET,
+ ICP_INIT_REQUEST_SET
+};
+
+/**
+ * @QHDR_INACTIVE: Queue is inactive
+ * @QHDR_ACTIVE: Queue is active
+ */
+enum qhdr_status {
+ QHDR_INACTIVE,
+ QHDR_ACTIVE
+};
+
+/**
+ * @INTR_MODE: event driven mode 1, each send and receive generates interrupt
+ * @WM_MODE: event driven mode 2, interrupts based on watermark mechanism
+ * @POLL_MODE: poll method
+ */
+enum qhdr_event_drv_type {
+ INTR_MODE,
+ WM_MODE,
+ POLL_MODE
+};
+
+/**
+ * @TX_INT: event driven mode 1, each send and receive generates interrupt
+ * @TX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
+ * @TX_POLL: poll method
+ * @ICP_QHDR_TX_TYPE_MASK defines position in qhdr_type
+ */
+enum qhdr_tx_type {
+ TX_INT,
+ TX_INT_WM,
+ TX_POLL
+};
+
+/**
+ * @RX_INT: event driven mode 1, each send and receive generates interrupt
+ * @RX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
+ * @RX_POLL: poll method
+ * @ICP_QHDR_RX_TYPE_MASK defines position in qhdr_type
+ */
+enum qhdr_rx_type {
+ RX_INT,
+ RX_INT_WM,
+ RX_POLL
+};
+
+/**
+ * @Q_CMD: Host to FW command queue
+ * @Q_MSG: FW to Host message queue
+ * @Q_DEBUG: FW to Host debug queue
+ * @ICP_QHDR_Q_ID_MASK defines position in qhdr_type
+ */
+enum qhdr_q_id {
+ Q_CMD,
+ Q_MSG,
+ Q_DBG
+};
+
+/**
+ * struct hfi_qtbl_hdr
+ * @qtbl_version: Queue table version number
+ * Higher 16 bits: Major version
+ * Lower 16 bits: Minor version
+ * @qtbl_size: Queue table size from version to last parametr in qhdr entry
+ * @qtbl_qhdr0_offset: Offset to the start of first qhdr
+ * @qtbl_qhdr_size: Queue header size in bytes
+ * @qtbl_num_q: Total number of queues in Queue table
+ * @qtbl_num_active_q: Total number of active queues
+ */
+struct hfi_qtbl_hdr {
+ uint32_t qtbl_version;
+ uint32_t qtbl_size;
+ uint32_t qtbl_qhdr0_offset;
+ uint32_t qtbl_qhdr_size;
+ uint32_t qtbl_num_q;
+ uint32_t qtbl_num_active_q;
+} __packed;
+
+/**
+ * struct hfi_q_hdr
+ * @qhdr_status: Queue status, qhdr_state define possible status
+ * @qhdr_start_addr: Queue start address in non cached memory
+ * @qhdr_type: qhdr_tx, qhdr_rx, qhdr_q_id and priority defines qhdr type
+ * @qhdr_q_size: Queue size
+ * Number of queue packets if qhdr_pkt_size is non-zero
+ * Queue size in bytes if qhdr_pkt_size is zero
+ * @qhdr_pkt_size: Size of queue packet entries
+ * 0x0: variable queue packet size
+ * non zero: size of queue packet entry, fixed
+ * @qhdr_pkt_drop_cnt: Number of packets dropped by sender
+ * @qhdr_rx_wm: Receiver watermark, applicable in event driven mode
+ * @qhdr_tx_wm: Sender watermark, applicable in event driven mode
+ * @qhdr_rx_req: Receiver sets this bit if queue is empty
+ * @qhdr_tx_req: Sender sets this bit if queue is full
+ * @qhdr_rx_irq_status: Receiver sets this bit and triggers an interrupt to
+ * the sender after packets are dequeued. Sender clears this bit
+ * @qhdr_tx_irq_status: Sender sets this bit and triggers an interrupt to
+ * the receiver after packets are queued. Receiver clears this bit
+ * @qhdr_read_idx: Read index
+ * @qhdr_write_idx: Write index
+ */
+struct hfi_q_hdr {
+ uint32_t dummy[15];
+ uint32_t qhdr_status;
+ uint32_t dummy1[15];
+ uint32_t qhdr_start_addr;
+ uint32_t dummy2[15];
+ uint32_t qhdr_type;
+ uint32_t dummy3[15];
+ uint32_t qhdr_q_size;
+ uint32_t dummy4[15];
+ uint32_t qhdr_pkt_size;
+ uint32_t dummy5[15];
+ uint32_t qhdr_pkt_drop_cnt;
+ uint32_t dummy6[15];
+ uint32_t qhdr_rx_wm;
+ uint32_t dummy7[15];
+ uint32_t qhdr_tx_wm;
+ uint32_t dummy8[15];
+ uint32_t qhdr_rx_req;
+ uint32_t dummy9[15];
+ uint32_t qhdr_tx_req;
+ uint32_t dummy10[15];
+ uint32_t qhdr_rx_irq_status;
+ uint32_t dummy11[15];
+ uint32_t qhdr_tx_irq_status;
+ uint32_t dummy12[15];
+ uint32_t qhdr_read_idx;
+ uint32_t dummy13[15];
+ uint32_t qhdr_write_idx;
+ uint32_t dummy14[15];
+};
+
+/**
+ * struct sfr_buf
+ * @size: Number of characters
+ * @msg : Subsystem failure reason
+ */
+struct sfr_buf {
+ uint32_t size;
+ char msg[ICP_MSG_SFR_SIZE_IN_BYTES];
+};
+
+/**
+ * struct hfi_q_tbl
+ * @q_tbl_hdr: Queue table header
+ * @q_hdr: Queue header info, it holds info of cmd, msg and debug queues
+ */
+struct hfi_qtbl {
+ struct hfi_qtbl_hdr q_tbl_hdr;
+ struct hfi_q_hdr q_hdr[MAX_ICP_HFI_QUEUES];
+};
+
+/**
+ * struct hfi_info
+ * @map: Hfi shared memory info
+ * @smem_size: Shared memory size
+ * @uncachedheap_size: uncached heap size
+ * @msgpacket_buf: message buffer
+ * @hfi_state: State machine for hfi
+ * @cmd_q_lock: Lock for command queue
+ * @cmd_q_state: State of command queue
+ * @mutex msg_q_lock: Lock for message queue
+ * @msg_q_state: State of message queue
+ * @csr_base: CSR base address
+ */
+struct hfi_info {
+ struct hfi_mem_info map;
+ uint32_t smem_size;
+ uint32_t uncachedheap_size;
+ uint32_t msgpacket_buf[ICP_HFI_MAX_MSG_SIZE_IN_WORDS];
+ uint8_t hfi_state;
+ struct mutex cmd_q_lock;
+ bool cmd_q_state;
+ struct mutex msg_q_lock;
+ bool msg_q_state;
+ void __iomem *csr_base;
+};
+
+#endif /* _CAM_HFI_REG_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_session_defs.h b/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_session_defs.h
new file mode 100644
index 000000000000..38137b89295e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_session_defs.h
@@ -0,0 +1,571 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HFI_SESSION_DEFS_H
+#define _CAM_HFI_SESSION_DEFS_H
+
+#include <linux/types.h>
+
+#define HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO 0x1
+#define HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS 0x2
+#define HFI_IPEBPS_CMD_OPCODE_BPS_ABORT 0x3
+#define HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY 0x4
+
+#define HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO 0x5
+#define HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS 0x6
+#define HFI_IPEBPS_CMD_OPCODE_IPE_ABORT 0x7
+#define HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY 0x8
+
+#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_IPE 0x9
+#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_BPS 0xa
+#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_BPS 0xb
+#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_IPE 0xc
+
+#define HFI_IPEBPS_CMD_OPCODE_MEM_MAP 0xe
+#define HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP 0xf
+
+#define HFI_IPEBPS_HANDLE_TYPE_BPS 0x1
+#define HFI_IPEBPS_HANDLE_TYPE_IPE_RT 0x2
+#define HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT 0x3
+
+/**
+ * struct mem_map_region_data
+ * @start_addr: cmd buffer region start addr
+ * @len : length of the region
+ *
+ * create mem_map_region_data
+ */
+struct mem_map_region_data {
+ uint32_t start_addr;
+ uint32_t len;
+};
+
+/**
+ * struct hfi_cmd_ipe_bps_map
+ * @user_data : user arg
+ * @mem_map_request_num: number of mappings
+ * @mem_map_region_sets: array of all map/unmap info
+ *
+ * create hfi_cmd_ipe_bps_map
+ */
+struct hfi_cmd_ipe_bps_map {
+ uint64_t user_data;
+ uint32_t mem_map_request_num;
+ struct mem_map_region_data mem_map_region_sets[1];
+} __packed;
+
+/**
+ * struct hfi_cmd_ipe_bps_map_ack
+ * @rc : Async return code
+ * @user_data: user_arg
+ *
+ * create hfi_cmd_ipe_bps_map_ack
+ */
+struct hfi_cmd_ipe_bps_map_ack {
+ uint32_t rc;
+ uint64_t user_data;
+};
+
+/**
+ * struct abort_data
+ * @num_req_ids: Number of req ids
+ * @num_req_id: point to specific req id
+ *
+ * create abort data
+ */
+struct abort_data {
+ uint32_t num_req_ids;
+ uint32_t num_req_id[1];
+};
+
+/**
+ * struct hfi_cmd_data
+ * @abort: abort data
+ * @user data: user supplied argument
+ *
+ * create session abort data
+ */
+struct hfi_cmd_abort {
+ struct abort_data abort;
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_abort_destroy
+ * @user_data: user supplied data
+ *
+ * IPE/BPS destroy/abort command
+ * @HFI_IPEBPS_CMD_OPCODE_IPE_ABORT
+ * @HFI_IPEBPS_CMD_OPCODE_BPS_ABORT
+ * @HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY
+ * @HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY
+ */
+struct hfi_cmd_abort_destroy {
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_chaining_ops
+ * @wait_hdl: current session handle waits on wait_hdl to complete operation
+ * @user_data: user supplied argument
+ *
+ * this structure for chaining opcodes
+ * BPS_WAITS_FOR_IPE
+ * BPS_WAITS_FOR_BPS
+ * IPE_WAITS_FOR_BPS
+ * IPE_WAITS_FOR_IPE
+ */
+struct hfi_cmd_chaining_ops {
+ uint32_t wait_hdl;
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_create_handle
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @handle_type: IPE/BPS firmware session handle type
+ * @user_data1: caller provided data1
+ * @user_data2: caller provided data2
+ *
+ * create firmware session handle
+ */
+struct hfi_cmd_create_handle {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t handle_type;
+ uint64_t user_data1;
+ uint64_t user_data2;
+} __packed;
+
+/**
+ * struct hfi_cmd_ipebps_async
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @opcode: opcode for IPE/BPS async operation
+ * CONFIG_IO: configures I/O for IPE/BPS handle
+ * FRAME_PROCESS: image frame to be processed by IPE/BPS
+ * ABORT: abort all processing frames of IPE/BPS handle
+ * DESTROY: destroy earlier created IPE/BPS handle
+ * BPS_WAITS_FOR_IPE: sync for BPS to wait for IPE
+ * BPS_WAITS_FOR_BPS: sync for BPS to wait for BPS
+ * IPE_WAITS_FOR_IPE: sync for IPE to wait for IPE
+ * IPE_WAITS_FOR_BPS: sync for IPE to wait for BPS
+ * @num_fw_handles: number of IPE/BPS firmware handles in fw_handles array
+ * @fw_handles: IPE/BPS handles array
+ * @payload: command payload for IPE/BPS opcodes
+ * @direct: points to actual payload
+ * @indirect: points to address of payload
+ *
+ * sends async command to the earlier created IPE or BPS handle
+ * for asynchronous operation.
+ */
+struct hfi_cmd_ipebps_async {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t opcode;
+ uint64_t user_data1;
+ uint64_t user_data2;
+ uint32_t num_fw_handles;
+ uint32_t fw_handles[1];
+ union {
+ uint32_t direct[1];
+ uint32_t indirect;
+ } payload;
+} __packed;
+
+/**
+ * struct hfi_msg_create_handle_ack
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code
+ * @fw_handle: output param for IPE/BPS handle
+ * @user_data1: user provided data1
+ * @user_data2: user provided data2
+ *
+ * ack for create handle command of IPE/BPS
+ * @HFI_MSG_IPEBPS_CREATE_HANDLE_ACK
+ */
+struct hfi_msg_create_handle_ack {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t err_type;
+ uint32_t fw_handle;
+ uint64_t user_data1;
+ uint64_t user_data2;
+} __packed;
+
+/**
+ * struct hfi_msg_ipebps_async
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @opcode: opcode of IPE/BPS async operation
+ * @user_data1: user provided data1
+ * @user_data2: user provided data2
+ * @err_type: error code
+ * @msg_data: IPE/BPS async done message data
+ *
+ * result of IPE/BPS async command
+ * @HFI_MSG_IPEBPS_ASYNC_COMMAND_ACK
+ */
+struct hfi_msg_ipebps_async_ack {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t opcode;
+ uint64_t user_data1;
+ uint64_t user_data2;
+ uint32_t err_type;
+ uint32_t msg_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_frame_process_done
+ * @result: result of frame process command
+ * @scratch_buffer_address: address of scratch buffer
+ */
+struct hfi_msg_frame_process_done {
+ uint32_t result;
+ uint32_t scratch_buffer_address;
+};
+
+/**
+ * struct hfi_msg_chaining_op
+ * @status: return status
+ * @user_data: user data provided as part of chaining ops
+ *
+ * IPE/BPS wait response
+ */
+struct hfi_msg_chaining_op {
+ uint32_t status;
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_abort_destroy
+ * @status: return status
+ * @user_data: user data provided as part of abort/destroy ops
+ *
+ * IPE/BPS abort/destroy response
+ */
+struct hfi_msg_abort_destroy {
+ uint32_t status;
+ uint64_t user_data;
+} __packed;
+
+#define MAX_NUM_OF_IMAGE_PLANES 2
+#define MAX_HFR_GROUP 16
+
+enum hfi_ipe_io_images {
+ IPE_INPUT_IMAGE_FULL,
+ IPE_INPUT_IMAGE_DS4,
+ IPE_INPUT_IMAGE_DS16,
+ IPE_INPUT_IMAGE_DS64,
+ IPE_INPUT_IMAGE_FULL_REF,
+ IPE_INPUT_IMAGE_DS4_REF,
+ IPE_INPUT_IMAGE_DS16_REF,
+ IPE_INPUT_IMAGE_DS64_REF,
+ IPE_OUTPUT_IMAGE_DISPLAY,
+ IPE_OUTPUT_IMAGE_VIDEO,
+ IPE_OUTPUT_IMAGE_FULL_REF,
+ IPE_OUTPUT_IMAGE_DS4_REF,
+ IPE_OUTPUT_IMAGE_DS16_REF,
+ IPE_OUTPUT_IMAGE_DS64_REF,
+ IPE_INPUT_IMAGE_FIRST = IPE_INPUT_IMAGE_FULL,
+ IPE_INPUT_IMAGE_LAST = IPE_INPUT_IMAGE_DS64_REF,
+ IPE_OUTPUT_IMAGE_FIRST = IPE_OUTPUT_IMAGE_DISPLAY,
+ IPE_OUTPUT_IMAGE_LAST = IPE_OUTPUT_IMAGE_DS64_REF,
+ IPE_IO_IMAGES_MAX
+};
+
+enum bps_io_images {
+ BPS_INPUT_IMAGE,
+ BPS_OUTPUT_IMAGE_FULL,
+ BPS_OUTPUT_IMAGE_DS4,
+ BPS_OUTPUT_IMAGE_DS16,
+ BPS_OUTPUT_IMAGE_DS64,
+ BPS_OUTPUT_IMAGE_STATS_BG,
+ BPS_OUTPUT_IMAGE_STATS_BHIST,
+ BPS_OUTPUT_IMAGE_REG1,
+ BPS_OUTPUT_IMAGE_REG2,
+ BPS_OUTPUT_IMAGE_FIRST = BPS_OUTPUT_IMAGE_FULL,
+ BPS_OUTPUT_IMAGE_LAST = BPS_OUTPUT_IMAGE_REG2,
+ BPS_IO_IMAGES_MAX
+};
+
+struct frame_buffer {
+ uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+ uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+struct bps_frame_process_data {
+ struct frame_buffer buffers[BPS_IO_IMAGES_MAX];
+ uint32_t max_num_cores;
+ uint32_t target_time;
+ uint32_t ubwc_stats_buffer_addr;
+ uint32_t ubwc_stats_buffer_size;
+ uint32_t cdm_buffer_addr;
+ uint32_t cdm_buffer_size;
+ uint32_t iq_settings_addr;
+ uint32_t strip_lib_out_addr;
+ uint32_t cdm_prog_addr;
+ uint32_t request_id;
+};
+
+enum hfi_ipe_image_format {
+ IMAGE_FORMAT_INVALID,
+ IMAGE_FORMAT_MIPI_8,
+ IMAGE_FORMAT_MIPI_10,
+ IMAGE_FORMAT_MIPI_12,
+ IMAGE_FORMAT_MIPI_14,
+ IMAGE_FORMAT_BAYER_8,
+ IMAGE_FORMAT_BAYER_10,
+ IMAGE_FORMAT_BAYER_12,
+ IMAGE_FORMAT_BAYER_14,
+ IMAGE_FORMAT_PDI_10,
+ IMAGE_FORMAT_PD_10,
+ IMAGE_FORMAT_PD_8,
+ IMAGE_FORMAT_INDICATIONS,
+ IMAGE_FORMAT_REFINEMENT,
+ IMAGE_FORMAT_UBWC_TP_10,
+ IMAGE_FORMAT_UBWC_NV_12,
+ IMAGE_FORMAT_UBWC_NV12_4R,
+ IMAGE_FORMAT_UBWC_P010,
+ IMAGE_FORMAT_LINEAR_TP_10,
+ IMAGE_FORMAT_LINEAR_P010,
+ IMAGE_FORMAT_LINEAR_NV12,
+ IMAGE_FORMAT_LINEAR_PLAIN_16,
+ IMAGE_FORMAT_YUV422_8,
+ IMAGE_FORMAT_YUV422_10,
+ IMAGE_FORMAT_STATISTICS_BAYER_GRID,
+ IMAGE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
+ IMAGE_FORMAT_MAX
+};
+
+enum hfi_ipe_plane_format {
+ PLANE_FORMAT_INVALID = 0,
+ PLANE_FORMAT_MIPI_8,
+ PLANE_FORMAT_MIPI_10,
+ PLANE_FORMAT_MIPI_12,
+ PLANE_FORMAT_MIPI_14,
+ PLANE_FORMAT_BAYER_8,
+ PLANE_FORMAT_BAYER_10,
+ PLANE_FORMAT_BAYER_12,
+ PLANE_FORMAT_BAYER_14,
+ PLANE_FORMAT_PDI_10,
+ PLANE_FORMAT_PD_10,
+ PLANE_FORMAT_PD_8,
+ PLANE_FORMAT_INDICATIONS,
+ PLANE_FORMAT_REFINEMENT,
+ PLANE_FORMAT_UBWC_TP_10_Y,
+ PLANE_FORMAT_UBWC_TP_10_C,
+ PLANE_FORMAT_UBWC_NV_12_Y,
+ PLANE_FORMAT_UBWC_NV_12_C,
+ PLANE_FORMAT_UBWC_NV_12_4R_Y,
+ PLANE_FORMAT_UBWC_NV_12_4R_C,
+ PLANE_FORMAT_UBWC_P010_Y,
+ PLANE_FORMAT_UBWC_P010_C,
+ PLANE_FORMAT_LINEAR_TP_10_Y,
+ PLANE_FORMAT_LINEAR_TP_10_C,
+ PLANE_FORMAT_LINEAR_P010_Y,
+ PLANE_FORMAT_LINEAR_P010_C,
+ PLANE_FORMAT_LINEAR_NV12_Y,
+ PLANE_FORMAT_LINEAR_NV12_C,
+ PLANE_FORMAT_LINEAR_PLAIN_16_Y,
+ PLANE_FORMAT_LINEAR_PLAIN_16_C,
+ PLANE_FORMAT_YUV422_8,
+ PLANE_FORMAT_YUV422_10,
+ PLANE_FORMAT_STATISTICS_BAYER_GRID,
+ PLANE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
+ PLANE_FORMAT_MAX
+};
+
+enum hfi_ipe_bayer_pixel_order {
+ FIRST_PIXEL_R,
+ FIRST_PIXEL_GR,
+ FIRST_PIXEL_B,
+ FIRST_PIXEL_GB,
+ FIRST_PIXEL_MAX
+};
+
+enum hfi_ipe_pixel_pack_alignment {
+ PIXEL_LSB_ALIGNED,
+ PIXEL_MSB_ALIGNED,
+};
+
+enum hfi_ipe_yuv_422_order {
+ PIXEL_ORDER_Y_U_Y_V,
+ PIXEL_ORDER_Y_V_Y_U,
+ PIXEL_ORDER_U_Y_V_Y,
+ PIXEL_ORDER_V_Y_U_Y,
+ PIXEL_ORDER_YUV422_MAX
+};
+
+enum ubwc_write_client {
+ IPE_WR_CLIENT_0 = 0,
+ IPE_WR_CLIENT_1,
+ IPE_WR_CLIENT_5,
+ IPE_WR_CLIENT_6,
+ IPE_WR_CLIENT_7,
+ IPE_WR_CLIENT_8,
+ IPE_WR_CLIENT_MAX
+};
+
+/**
+ * struct image_info
+ * @format: image format
+ * @img_width: image width
+ * @img_height: image height
+ * @bayer_order: pixel order
+ * @pix_align: alignment
+ * @yuv422_order: YUV order
+ * @byte_swap: byte swap
+ */
+struct image_info {
+ enum hfi_ipe_image_format format;
+ uint32_t img_width;
+ uint32_t img_height;
+ enum hfi_ipe_bayer_pixel_order bayer_order;
+ enum hfi_ipe_pixel_pack_alignment pix_align;
+ enum hfi_ipe_yuv_422_order yuv422_order;
+ uint32_t byte_swap;
+} __packed;
+
+/**
+ * struct buffer_layout
+ * @buf_stride: buffer stride
+ * @buf_height: buffer height
+ */
+struct buffer_layout {
+ uint32_t buf_stride;
+ uint32_t buf_height;
+} __packed;
+
+/**
+ * struct image_desc
+ * @info: image info
+ * @buf_layout: buffer layout
+ * @meta_buf_layout: meta buffer layout
+ */
+struct image_desc {
+ struct image_info info;
+ struct buffer_layout buf_layout[MAX_NUM_OF_IMAGE_PLANES];
+ struct buffer_layout meta_buf_layout[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+struct ica_stab_coeff {
+ uint32_t coeffs[8];
+} __packed;
+
+struct ica_stab_params {
+ uint32_t mode;
+ struct ica_stab_coeff transforms[3];
+} __packed;
+
+struct frame_set {
+ struct frame_buffer buffers[IPE_IO_IMAGES_MAX];
+ struct ica_stab_params ica_params;
+ uint32_t cdm_ica1_addr;
+ uint32_t cdm_ica2_addr;
+} __packed;
+
+struct ipe_frame_process_data {
+ uint32_t strip_lib_out_addr;
+ uint32_t iq_settings_addr;
+ uint32_t scratch_buffer_addr;
+ uint32_t scratch_buffer_size;
+ uint32_t ubwc_stats_buffer_addr;
+ uint32_t ubwc_stats_buffer_size;
+ uint32_t cdm_buffer_addr;
+ uint32_t cdm_buffer_size;
+ uint32_t max_num_cores;
+ uint32_t target_time;
+ uint32_t cdm_prog_base;
+ uint32_t cdm_pre_ltm;
+ uint32_t cdm_post_ltm;
+ uint32_t cdm_anr_full_pass;
+ uint32_t cdm_anr_ds4;
+ uint32_t cdm_anr_ds16;
+ uint32_t cdm_anr_ds64;
+ uint32_t cdm_tf_full_pass;
+ uint32_t cdm_tf_ds4;
+ uint32_t cdm_tf_ds16;
+ uint32_t cdm_tf_ds64;
+ uint32_t request_id;
+ uint32_t frames_in_batch;
+ struct frame_set framesets[MAX_HFR_GROUP];
+} __packed;
+
+/**
+ * struct hfi_cmd_ipe_config
+ * @images: images descreptions
+ * @user_data: user supplied data
+ *
+ * payload for IPE async command
+ */
+struct hfi_cmd_ipe_config {
+ struct image_desc images[IPE_IO_IMAGES_MAX];
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct frame_buffers
+ * @buf_ptr: buffer pointers for all planes
+ * @meta_buf_ptr: meta buffer pointers for all planes
+ */
+struct frame_buffers {
+ uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+ uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+/**
+ * struct hfi_msg_ipe_config
+ * @rc: result of ipe config command
+ * @scratch_mem_size: scratch mem size for a config
+ * @user_data: user data
+ */
+struct hfi_msg_ipe_config {
+ uint32_t rc;
+ uint32_t scratch_mem_size;
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_bps_common
+ * @rc: result of ipe config command
+ * @user_data: user data
+ */
+struct hfi_msg_bps_common {
+ uint32_t rc;
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct ipe_bps_destroy
+ * @user_data: user data
+ */
+struct ipe_bps_destroy {
+ uint64_t userdata;
+};
+
+/**
+ * struct hfi_msg_ipe_frame_process
+ * @status: result of ipe frame process command
+ * @scratch_buf_addr: address of scratch buffer
+ * @user_data: user data
+ */
+struct hfi_msg_ipe_frame_process {
+ uint32_t status;
+ uint32_t scratch_buf_addr;
+ uint64_t user_data;
+} __packed;
+
+#endif /* _CAM_HFI_SESSION_DEFS_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_sys_defs.h
new file mode 100644
index 000000000000..311886ffd6da
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/fw_inc/hfi_sys_defs.h
@@ -0,0 +1,535 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HFI_DEFS_H_
+#define _HFI_DEFS_H_
+
+#include <linux/types.h>
+
+/*
+ * Following base acts as common starting points
+ * for all enumerations.
+ */
+#define HFI_COMMON_BASE 0x0
+
+/* HFI Domain base offset for commands and messages */
+#define HFI_DOMAIN_SHFT (24)
+#define HFI_DOMAIN_BMSK (0x7 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_ICP (0x0 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_IPE_BPS (0x1 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_CDM (0x2 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_DBG (0x3 << HFI_DOMAIN_SHFT)
+
+/* Command base offset for commands */
+#define HFI_CMD_START_OFFSET 0x10000
+
+/* Command base offset for messages */
+#define HFI_MSG_START_OFFSET 0x20000
+
+/* System Level Error types */
+#define HFI_ERR_SYS_NONE (HFI_COMMON_BASE)
+#define HFI_ERR_SYS_FATAL (HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_VERSION_MISMATCH (HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN (HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_UNSUPPORT_CMD (HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_CMDFAILED (HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_CMDSIZE (HFI_COMMON_BASE + 0x6)
+
+/* System Level Event types */
+#define HFI_EVENT_SYS_ERROR (HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_ICP_ERROR (HFI_COMMON_BASE + 0x2)
+#define HFI_EVENT_IPE_BPS_ERROR (HFI_COMMON_BASE + 0x3)
+#define HFI_EVENT_CDM_ERROR (HFI_COMMON_BASE + 0x4)
+#define HFI_EVENT_DBG_ERROR (HFI_COMMON_BASE + 0x5)
+
+/* Core level start Ranges for errors */
+#define HFI_ERR_ICP_START (HFI_COMMON_BASE + 0x64)
+#define HFI_ERR_IPE_BPS_START (HFI_ERR_ICP_START + 0x64)
+#define HFI_ERR_CDM_START (HFI_ERR_IPE_BPS_START + 0x64)
+#define HFI_ERR_DBG_START (HFI_ERR_CDM_START + 0x64)
+
+/*ICP Core level error messages */
+#define HFI_ERR_NO_RES (HFI_ERR_ICP_START + 0x1)
+#define HFI_ERR_UNSUPPORTED_RES (HFI_ERR_ICP_START + 0x2)
+#define HFI_ERR_UNSUPPORTED_PROP (HFI_ERR_ICP_START + 0x3)
+#define HFI_ERR_INIT_EXPECTED (HFI_ERR_ICP_START + 0x4)
+#define HFI_ERR_INIT_IGNORED (HFI_ERR_ICP_START + 0x5)
+
+/* System level commands */
+#define HFI_CMD_COMMON_START \
+ (HFI_DOMAIN_BASE_ICP + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_SYS_INIT (HFI_CMD_COMMON_START + 0x1)
+#define HFI_CMD_SYS_PC_PREP (HFI_CMD_COMMON_START + 0x2)
+#define HFI_CMD_SYS_SET_PROPERTY (HFI_CMD_COMMON_START + 0x3)
+#define HFI_CMD_SYS_GET_PROPERTY (HFI_CMD_COMMON_START + 0x4)
+#define HFI_CMD_SYS_PING (HFI_CMD_COMMON_START + 0x5)
+#define HFI_CMD_SYS_RESET (HFI_CMD_COMMON_START + 0x6)
+
+/* Core level commands */
+/* IPE/BPS core Commands */
+#define HFI_CMD_IPE_BPS_COMMON_START \
+ (HFI_DOMAIN_BASE_IPE_BPS + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_IPEBPS_CREATE_HANDLE \
+ (HFI_CMD_IPE_BPS_COMMON_START + 0x8)
+#define HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT \
+ (HFI_CMD_IPE_BPS_COMMON_START + 0xa)
+#define HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT \
+ (HFI_CMD_IPE_BPS_COMMON_START + 0xe)
+
+/* CDM core Commands */
+#define HFI_CMD_CDM_COMMON_START \
+ (HFI_DOMAIN_BASE_CDM + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_CDM_TEST_START (HFI_CMD_CDM_COMMON_START + 0x800)
+#define HFI_CMD_CDM_END (HFI_CMD_CDM_COMMON_START + 0xFFF)
+
+/* Debug/Test Commands */
+#define HFI_CMD_DBG_COMMON_START \
+ (HFI_DOMAIN_BASE_DBG + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_DBG_TEST_START (HFI_CMD_DBG_COMMON_START + 0x800)
+#define HFI_CMD_DBG_END (HFI_CMD_DBG_COMMON_START + 0xFFF)
+
+/* System level messages */
+#define HFI_MSG_ICP_COMMON_START \
+ (HFI_DOMAIN_BASE_ICP + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_SYS_INIT_DONE (HFI_MSG_ICP_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE (HFI_MSG_ICP_COMMON_START + 0x2)
+#define HFI_MSG_SYS_DEBUG (HFI_MSG_ICP_COMMON_START + 0x3)
+#define HFI_MSG_SYS_IDLE (HFI_MSG_ICP_COMMON_START + 0x4)
+#define HFI_MSG_SYS_PROPERTY_INFO (HFI_MSG_ICP_COMMON_START + 0x5)
+#define HFI_MSG_SYS_PING_ACK (HFI_MSG_ICP_COMMON_START + 0x6)
+#define HFI_MSG_SYS_RESET_ACK (HFI_MSG_ICP_COMMON_START + 0x7)
+#define HFI_MSG_EVENT_NOTIFY (HFI_MSG_ICP_COMMON_START + 0x8)
+
+/* Core level Messages */
+/* IPE/BPS core Messages */
+#define HFI_MSG_IPE_BPS_COMMON_START \
+ (HFI_DOMAIN_BASE_IPE_BPS + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_IPEBPS_CREATE_HANDLE_ACK \
+ (HFI_MSG_IPE_BPS_COMMON_START + 0x08)
+#define HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK \
+ (HFI_MSG_IPE_BPS_COMMON_START + 0x0a)
+#define HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK \
+ (HFI_MSG_IPE_BPS_COMMON_START + 0x0e)
+#define HFI_MSG_IPE_BPS_TEST_START \
+ (HFI_MSG_IPE_BPS_COMMON_START + 0x800)
+#define HFI_MSG_IPE_BPS_END \
+ (HFI_MSG_IPE_BPS_COMMON_START + 0xFFF)
+
+/* CDM core Messages */
+#define HFI_MSG_CDM_COMMON_START \
+ (HFI_DOMAIN_BASE_CDM + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_PRI_CDM_PAYLOAD_ACK (HFI_MSG_CDM_COMMON_START + 0xa)
+#define HFI_MSG_PRI_LLD_PAYLOAD_ACK (HFI_MSG_CDM_COMMON_START + 0xb)
+#define HFI_MSG_CDM_TEST_START (HFI_MSG_CDM_COMMON_START + 0x800)
+#define HFI_MSG_CDM_END (HFI_MSG_CDM_COMMON_START + 0xFFF)
+
+/* core level test command ranges */
+/* ICP core level test command range */
+#define HFI_CMD_ICP_TEST_START (HFI_CMD_ICP_COMMON_START + 0x800)
+#define HFI_CMD_ICP_END (HFI_CMD_ICP_COMMON_START + 0xFFF)
+
+/* IPE/BPS core level test command range */
+#define HFI_CMD_IPE_BPS_TEST_START \
+ (HFI_CMD_IPE_BPS_COMMON_START + 0x800)
+#define HFI_CMD_IPE_BPS_END (HFI_CMD_IPE_BPS_COMMON_START + 0xFFF)
+
+/* ICP core level test message range */
+#define HFI_MSG_ICP_TEST_START (HFI_MSG_ICP_COMMON_START + 0x800)
+#define HFI_MSG_ICP_END (HFI_MSG_ICP_COMMON_START + 0xFFF)
+
+/* ICP core level Debug test message range */
+#define HFI_MSG_DBG_COMMON_START \
+ (HFI_DOMAIN_BASE_DBG + 0x0)
+#define HFI_MSG_DBG_TEST_START (HFI_MSG_DBG_COMMON_START + 0x800)
+#define HFI_MSG_DBG_END (HFI_MSG_DBG_COMMON_START + 0xFFF)
+
+/* System level property base offset */
+#define HFI_PROPERTY_ICP_COMMON_START (HFI_DOMAIN_BASE_ICP + 0x0)
+
+#define HFI_PROP_SYS_DEBUG_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x1)
+#define HFI_PROP_SYS_UBWC_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x2)
+#define HFI_PROP_SYS_IMAGE_VER (HFI_PROPERTY_ICP_COMMON_START + 0x3)
+#define HFI_PROP_SYS_SUPPORTED (HFI_PROPERTY_ICP_COMMON_START + 0x4)
+#define HFI_PROP_SYS_IPEBPS_PC (HFI_PROPERTY_ICP_COMMON_START + 0x5)
+#define HFI_PROP_SYS_FW_DUMP_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x8)
+
+/* Capabilities reported at sys init */
+#define HFI_CAPS_PLACEHOLDER_1 (HFI_COMMON_BASE + 0x1)
+#define HFI_CAPS_PLACEHOLDER_2 (HFI_COMMON_BASE + 0x2)
+
+/* Section describes different debug levels (HFI_DEBUG_MSG_X)
+ * available for debug messages from FW
+ */
+#define HFI_DEBUG_MSG_LOW 0x00000001
+#define HFI_DEBUG_MSG_MEDIUM 0x00000002
+#define HFI_DEBUG_MSG_HIGH 0x00000004
+#define HFI_DEBUG_MSG_ERROR 0x00000008
+#define HFI_DEBUG_MSG_FATAL 0x00000010
+/* Messages containing performance data */
+#define HFI_DEBUG_MSG_PERF 0x00000020
+/* Disable ARM9 WFI in low power mode. */
+#define HFI_DEBUG_CFG_WFI 0x01000000
+/* Disable ARM9 watchdog. */
+#define HFI_DEBUG_CFG_ARM9WD 0x10000000
+
+
+/*
+ * HFI_FW_DUMP levels
+ * HFI_FW_DUMP_xx
+ */
+#define HFI_FW_DUMP_DISABLED 0x00000000
+#define HFI_FW_DUMP_ON_FAILURE 0x00000001
+#define HFI_FW_DUMP_ALWAYS 0x00000002
+
+/* Number of available dump levels. */
+#define NUM_HFI_DUMP_LVL 0x00000003
+
+/* Debug Msg Communication types:
+ * Section describes different modes (HFI_DEBUG_MODE_X)
+ * available to communicate the debug messages
+ */
+ /* Debug message output through the interface debug queue. */
+#define HFI_DEBUG_MODE_QUEUE 0x00000001
+ /* Debug message output through QDSS. */
+#define HFI_DEBUG_MODE_QDSS 0x00000002
+ /* Number of debug modes available. */
+#define NUM_HFI_DEBUG_MODE 0x00000002
+
+#define HFI_DEBUG_MSG_LOW 0x00000001
+#define HFI_DEBUG_MSG_MEDIUM 0x00000002
+#define HFI_DEBUG_MSG_HIGH 0x00000004
+#define HFI_DEBUG_MSG_ERROR 0x00000008
+#define HFI_DEBUG_MSG_FATAL 0x00000010
+#define HFI_DEBUG_MSG_PERF 0x00000020
+#define HFI_DEBUG_CFG_WFI 0x01000000
+#define HFI_DEBUG_CFG_ARM9WD 0x10000000
+
+#define HFI_DEV_VERSION_MAX 0x5
+
+/**
+ * start of sys command packet types
+ * These commands are used to get system level information
+ * from firmware
+ */
+
+/**
+ * struct hfi_caps_support
+ * payload to report caps through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
+ * @type: capability type
+ * @min: minimum supported value for the capability
+ * @max: maximum supported value for the capability
+ * @step_size: supported steps between min-max
+ */
+struct hfi_caps_support {
+ uint32_t type;
+ uint32_t min;
+ uint32_t max;
+ uint32_t step_size;
+} __packed;
+
+/**
+ * struct hfi_caps_support_info
+ * capability report through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
+ * @num_caps: number of capabilities listed
+ * @caps_data: capabilities info array
+ */
+struct hfi_caps_support_info {
+ uint32_t num_caps;
+ struct hfi_caps_support caps_data[1];
+} __packed;
+
+/**
+ * struct hfi_debug
+ * payload structure to configure HFI_PROPERTY_SYS_DEBUG_CONFIG
+ * @debug_config: it is a result of HFI_DEBUG_MSG_X values that
+ * are OR-ed together to specify the debug message types
+ * to otput
+ * @debug_mode: debug message output through debug queue/qdss
+ * @HFI_PROPERTY_SYS_DEBUG_CONFIG
+ */
+struct hfi_debug {
+ uint32_t debug_config;
+ uint32_t debug_mode;
+} __packed;
+
+/**
+ * struct hfi_ipe_bps_pc
+ * payload structure to configure HFI_PROPERTY_SYS_IPEBPS_PC
+ * @enable: Flag to enable IPE, BPS interfrane power collapse
+ * @core_info: Core information to firmware
+ */
+struct hfi_ipe_bps_pc {
+ uint32_t enable;
+ uint32_t core_info;
+} __packed;
+
+/**
+ * struct hfi_cmd_ubwc_cfg
+ * Payload structure to configure HFI_PROP_SYS_UBWC_CFG
+ * @ubwc_fetch_cfg: UBWC configuration for fecth
+ * @ubwc_write_cfg: UBWC configuration for write
+ */
+struct hfi_cmd_ubwc_cfg {
+ uint32_t ubwc_fetch_cfg;
+ uint32_t ubwc_write_cfg;
+};
+
+/**
+ * struct hfi_cmd_sys_init
+ * command to initialization of system session
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @HFI_CMD_SYS_INIT
+ */
+struct hfi_cmd_sys_init {
+ uint32_t size;
+ uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_cmd_pc_prep
+ * command to firmware to prepare for power collapse
+ * @eize: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @HFI_CMD_SYS_PC_PREP
+ */
+struct hfi_cmd_pc_prep {
+ uint32_t size;
+ uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_cmd_prop
+ * command to get/set properties of firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @num_prop: number of properties queried/set
+ * @prop_data: array of property IDs being queried. size depends on num_prop
+ * array of property IDs and associated structure pairs in set
+ * @HFI_CMD_SYS_GET_PROPERTY
+ * @HFI_CMD_SYS_SET_PROPERTY
+ */
+struct hfi_cmd_prop {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t num_prop;
+ uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_cmd_ping_pkt
+ * ping command pings the firmware to confirm whether
+ * it is alive.
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: client data, firmware returns this data
+ * as part of HFI_MSG_SYS_PING_ACK
+ * @HFI_CMD_SYS_PING
+ */
+struct hfi_cmd_ping_pkt {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_sys_reset_pkt
+ * sends the reset command to FW. FW responds in the same type
+ * of packet. so can be used for reset_ack_pkt type also
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: client data, firmware returns this data
+ * as part of HFI_MSG_SYS_RESET_ACK
+ * @HFI_CMD_SYS_RESET
+ */
+
+struct hfi_cmd_sys_reset_pkt {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint64_t user_data;
+} __packed;
+
+/* end of sys command packet types */
+
+/* start of sys message packet types */
+
+/**
+ * struct hfi_prop
+ * structure to report maximum supported features of firmware.
+ */
+struct hfi_sys_support {
+ uint32_t place_holder;
+} __packed;
+
+/**
+ * struct hfi_supported_prop
+ * structure to report HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED
+ * for a session
+ * @num_prop: number of properties supported
+ * @prop_data: array of supported property IDs
+ */
+struct hfi_supported_prop {
+ uint32_t num_prop;
+ uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_image_version
+ * system image version
+ * @major: major version number
+ * @minor: minor version number
+ * @ver_name_size: size of version name
+ * @ver_name: image version name
+ */
+struct hfi_image_version {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t ver_name_size;
+ uint8_t ver_name[1];
+} __packed;
+
+/**
+ * struct hfi_msg_init_done_data
+ * @api_ver: Firmware API version
+ * @dev_ver: Device version
+ * @num_icp_hw: Number of ICP hardware information
+ * @dev_hw_ver: Supported hardware version information
+ * @reserved: Reserved field
+ */
+struct hfi_msg_init_done_data {
+ uint32_t api_ver;
+ uint32_t dev_ver;
+ uint32_t num_icp_hw;
+ uint32_t dev_hw_ver[HFI_DEV_VERSION_MAX];
+ uint32_t reserved;
+};
+
+/**
+ * struct hfi_msg_init_done
+ * system init done message from firmware. Many system level properties
+ * are returned with the packet
+ * @size: Packet size in bytes
+ * @pkt_type: Opcode of a packet
+ * @err_type: Error code associated with response
+ * @num_prop: Number of default capability info
+ * @prop_data: Array of property ids and corresponding structure pairs
+ */
+struct hfi_msg_init_done {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t err_type;
+ uint32_t num_prop;
+ uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_pc_prep_done
+ * system power collapse preperation done message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code associated with the response
+ */
+struct hfi_msg_pc_prep_done {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t err_type;
+} __packed;
+
+/**
+ * struct hfi_msg_prop
+ * system property info from firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @num_prop: number of property info structures
+ * @prop_data: array of property IDs and associated structure pairs
+ */
+struct hfi_msg_prop {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t num_prop;
+ uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_idle
+ * system idle message from firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ */
+struct hfi_msg_idle {
+ uint32_t size;
+ uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_msg_ping_ack
+ * system ping ack message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: this data is sent as part of ping command from host
+ */
+struct hfi_msg_ping_ack {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_debug
+ * system debug message defination
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @msg_type: debug message type
+ * @msg_size: size of debug message in bytes
+ * @timestamp_hi: most significant 32 bits of the 64 bit timestamp field.
+ * timestamp shall be interpreted as a signed 64-bit value
+ * representing microseconds.
+ * @timestamp_lo: least significant 32 bits of the 64 bit timestamp field.
+ * timestamp shall be interpreted as a signed 64-bit value
+ * representing microseconds.
+ * @msg_data: message data in string form
+ */
+struct hfi_msg_debug {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t msg_type;
+ uint32_t msg_size;
+ uint32_t timestamp_hi;
+ uint32_t timestamp_lo;
+ uint8_t msg_data[1];
+} __packed;
+/**
+ * struct hfi_msg_event_notify
+ * event notify message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @fw_handle: firmware session handle
+ * @event_id: session event id
+ * @event_data1: event data corresponding to event ID
+ * @event_data2: event data corresponding to event ID
+ * @ext_event_data: info array, interpreted based on event_data1
+ * and event_data2
+ */
+struct hfi_msg_event_notify {
+ uint32_t size;
+ uint32_t pkt_type;
+ uint32_t fw_handle;
+ uint32_t event_id;
+ uint32_t event_data1;
+ uint32_t event_data2;
+ uint32_t ext_event_data[1];
+} __packed;
+/**
+ * end of sys message packet types
+ */
+
+#endif /* _HFI_DEFS_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/hfi.c b/drivers/media/platform/msm/camera_v3/cam_icp/hfi.c
new file mode 100644
index 000000000000..da1bf9e52ce8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/hfi.c
@@ -0,0 +1,897 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/errno.h>
+#include <linux/timer.h>
+#include <media/cam_icp.h>
+#include <linux/iopoll.h>
+#include <soc/qcom/socinfo.h>
+
+#include "cam_io_util.h"
+#include "hfi_reg.h"
+#include "hfi_sys_defs.h"
+#include "hfi_session_defs.h"
+#include "hfi_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+#include "cam_soc_util.h"
+
+#define HFI_VERSION_INFO_MAJOR_VAL 1
+#define HFI_VERSION_INFO_MINOR_VAL 1
+#define HFI_VERSION_INFO_STEP_VAL 0
+#define HFI_VERSION_INFO_STEP_VAL 0
+#define HFI_VERSION_INFO_MAJOR_BMSK 0xFF000000
+#define HFI_VERSION_INFO_MAJOR_SHFT 24
+#define HFI_VERSION_INFO_MINOR_BMSK 0xFFFF00
+#define HFI_VERSION_INFO_MINOR_SHFT 8
+#define HFI_VERSION_INFO_STEP_BMSK 0xFF
+#define HFI_VERSION_INFO_STEP_SHFT 0
+
+#define HFI_MAX_POLL_TRY 5
+
+static struct hfi_info *g_hfi;
+unsigned int g_icp_mmu_hdl;
+static DEFINE_MUTEX(hfi_cmd_q_mutex);
+static DEFINE_MUTEX(hfi_msg_q_mutex);
+
+void cam_hfi_queue_dump(void)
+{
+ struct hfi_qtbl *qtbl;
+ struct hfi_qtbl_hdr *qtbl_hdr;
+ struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr;
+ struct hfi_mem_info *hfi_mem = NULL;
+ uint32_t *read_q, *read_ptr;
+ int i;
+
+ hfi_mem = &g_hfi->map;
+ if (!hfi_mem) {
+ CAM_ERR(CAM_HFI, "Unable to dump queues hfi memory is NULL");
+ return;
+ }
+
+ qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
+ qtbl_hdr = &qtbl->q_tbl_hdr;
+ CAM_DBG(CAM_HFI,
+ "qtbl: version = %x size = %u num q = %u qhdr_size = %u",
+ qtbl_hdr->qtbl_version, qtbl_hdr->qtbl_size,
+ qtbl_hdr->qtbl_num_q, qtbl_hdr->qtbl_qhdr_size);
+
+ cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
+ CAM_DBG(CAM_HFI, "cmd: size = %u r_idx = %u w_idx = %u addr = %x",
+ cmd_q_hdr->qhdr_q_size, cmd_q_hdr->qhdr_read_idx,
+ cmd_q_hdr->qhdr_write_idx, hfi_mem->cmd_q.iova);
+ read_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+ read_ptr = (uint32_t *)(read_q + 0);
+ CAM_DBG(CAM_HFI, "CMD Q START");
+ for (i = 0; i < ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
+ CAM_DBG(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
+
+ msg_q_hdr = &qtbl->q_hdr[Q_MSG];
+ CAM_DBG(CAM_HFI, "msg: size = %u r_idx = %u w_idx = %u addr = %x",
+ msg_q_hdr->qhdr_q_size, msg_q_hdr->qhdr_read_idx,
+ msg_q_hdr->qhdr_write_idx, hfi_mem->msg_q.iova);
+ read_q = (uint32_t *)g_hfi->map.msg_q.kva;
+ read_ptr = (uint32_t *)(read_q + 0);
+ CAM_DBG(CAM_HFI, "MSG Q START");
+ for (i = 0; i < ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
+ CAM_DBG(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
+}
+
+int hfi_write_cmd(void *cmd_ptr)
+{
+ uint32_t size_in_words, empty_space, new_write_idx, read_idx, temp;
+ uint32_t *write_q, *write_ptr;
+ struct hfi_qtbl *q_tbl;
+ struct hfi_q_hdr *q;
+ int rc = 0;
+
+ if (!cmd_ptr) {
+ CAM_ERR(CAM_HFI, "command is null");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hfi_cmd_q_mutex);
+ if (!g_hfi) {
+ CAM_ERR(CAM_HFI, "HFI interface not setup");
+ rc = -ENODEV;
+ goto err;
+ }
+
+ if (g_hfi->hfi_state != HFI_READY ||
+ !g_hfi->cmd_q_state) {
+ CAM_ERR(CAM_HFI, "HFI state: %u, cmd q state: %u",
+ g_hfi->hfi_state, g_hfi->cmd_q_state);
+ rc = -ENODEV;
+ goto err;
+ }
+
+ q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
+ q = &q_tbl->q_hdr[Q_CMD];
+
+ write_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+
+ size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT;
+ if (!size_in_words) {
+ CAM_DBG(CAM_HFI, "failed");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ read_idx = q->qhdr_read_idx;
+ empty_space = (q->qhdr_write_idx >= read_idx) ?
+ (q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
+ (read_idx - q->qhdr_write_idx);
+ if (empty_space <= size_in_words) {
+ CAM_ERR(CAM_HFI, "failed: empty space %u, size_in_words %u",
+ empty_space, size_in_words);
+ rc = -EIO;
+ goto err;
+ }
+
+ new_write_idx = q->qhdr_write_idx + size_in_words;
+ write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx);
+
+ if (new_write_idx < q->qhdr_q_size) {
+ memcpy(write_ptr, (uint8_t *)cmd_ptr,
+ size_in_words << BYTE_WORD_SHIFT);
+ } else {
+ new_write_idx -= q->qhdr_q_size;
+ temp = (size_in_words - new_write_idx) << BYTE_WORD_SHIFT;
+ memcpy(write_ptr, (uint8_t *)cmd_ptr, temp);
+ memcpy(write_q, (uint8_t *)cmd_ptr + temp,
+ new_write_idx << BYTE_WORD_SHIFT);
+ }
+
+ /*
+ * To make sure command data in a command queue before
+ * updating write index
+ */
+ wmb();
+
+ q->qhdr_write_idx = new_write_idx;
+
+ /*
+ * Before raising interrupt make sure command data is ready for
+ * firmware to process
+ */
+ wmb();
+ cam_io_w_mb((uint32_t)INTR_ENABLE,
+ g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
+err:
+ mutex_unlock(&hfi_cmd_q_mutex);
+ return rc;
+}
+
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id,
+ uint32_t *words_read)
+{
+ struct hfi_qtbl *q_tbl_ptr;
+ struct hfi_q_hdr *q;
+ uint32_t new_read_idx, size_in_words, word_diff, temp;
+ uint32_t *read_q, *read_ptr, *write_ptr;
+ uint32_t size_upper_bound = 0;
+ int rc = 0;
+
+ if (!pmsg) {
+ CAM_ERR(CAM_HFI, "Invalid msg");
+ return -EINVAL;
+ }
+
+ if (q_id > Q_DBG) {
+ CAM_ERR(CAM_HFI, "Inavlid q :%u", q_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hfi_msg_q_mutex);
+ if (!g_hfi) {
+ CAM_ERR(CAM_HFI, "hfi not set up yet");
+ rc = -ENODEV;
+ goto err;
+ }
+
+ if ((g_hfi->hfi_state != HFI_READY) ||
+ !g_hfi->msg_q_state) {
+ CAM_ERR(CAM_HFI, "hfi state: %u, msg q state: %u",
+ g_hfi->hfi_state, g_hfi->msg_q_state);
+ rc = -ENODEV;
+ goto err;
+ }
+
+ q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
+ q = &q_tbl_ptr->q_hdr[q_id];
+
+ if (q->qhdr_read_idx == q->qhdr_write_idx) {
+ CAM_DBG(CAM_HFI, "Q not ready, state:%u, r idx:%u, w idx:%u",
+ g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
+ rc = -EIO;
+ goto err;
+ }
+
+ if (q_id == Q_MSG) {
+ read_q = (uint32_t *)g_hfi->map.msg_q.kva;
+ size_upper_bound = ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS;
+ } else {
+ read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
+ size_upper_bound = ICP_HFI_MAX_PKT_SIZE_IN_WORDS;
+ }
+
+ read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
+ write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx);
+
+ if (write_ptr > read_ptr)
+ size_in_words = write_ptr - read_ptr;
+ else {
+ word_diff = read_ptr - write_ptr;
+ if (q_id == Q_MSG)
+ size_in_words = (ICP_MSG_Q_SIZE_IN_BYTES >>
+ BYTE_WORD_SHIFT) - word_diff;
+ else
+ size_in_words = (ICP_DBG_Q_SIZE_IN_BYTES >>
+ BYTE_WORD_SHIFT) - word_diff;
+ }
+
+ if ((size_in_words == 0) ||
+ (size_in_words > size_upper_bound)) {
+ CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
+ size_in_words << BYTE_WORD_SHIFT);
+ q->qhdr_read_idx = q->qhdr_write_idx;
+ rc = -EIO;
+ goto err;
+ }
+
+ new_read_idx = q->qhdr_read_idx + size_in_words;
+
+ if (new_read_idx < q->qhdr_q_size) {
+ memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT);
+ } else {
+ new_read_idx -= q->qhdr_q_size;
+ temp = (size_in_words - new_read_idx) << BYTE_WORD_SHIFT;
+ memcpy(pmsg, read_ptr, temp);
+ memcpy((uint8_t *)pmsg + temp, read_q,
+ new_read_idx << BYTE_WORD_SHIFT);
+ }
+
+ q->qhdr_read_idx = new_read_idx;
+ *words_read = size_in_words;
+ /* Memory Barrier to make sure message
+ * queue parameters are updated after read
+ */
+ wmb();
+err:
+ mutex_unlock(&hfi_msg_q_mutex);
+ return rc;
+}
+
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg)
+{
+ uint8_t *prop;
+ struct hfi_cmd_prop *dbg_prop;
+ uint32_t size = 0;
+
+ size = sizeof(struct hfi_cmd_prop) +
+ sizeof(struct hfi_cmd_ubwc_cfg);
+
+ prop = kzalloc(size, GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ dbg_prop = (struct hfi_cmd_prop *)prop;
+ dbg_prop->size = size;
+ dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ dbg_prop->num_prop = 1;
+ dbg_prop->prop_data[0] = HFI_PROP_SYS_UBWC_CFG;
+ dbg_prop->prop_data[1] = ubwc_cfg[0];
+ dbg_prop->prop_data[2] = ubwc_cfg[1];
+
+ hfi_write_cmd(prop);
+ kfree(prop);
+
+ return 0;
+}
+
+int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info)
+{
+ uint8_t *prop;
+ struct hfi_cmd_prop *dbg_prop;
+ uint32_t size = 0;
+
+ size = sizeof(struct hfi_cmd_prop) +
+ sizeof(struct hfi_ipe_bps_pc);
+
+ prop = kzalloc(size, GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ dbg_prop = (struct hfi_cmd_prop *)prop;
+ dbg_prop->size = size;
+ dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ dbg_prop->num_prop = 1;
+ dbg_prop->prop_data[0] = HFI_PROP_SYS_IPEBPS_PC;
+ dbg_prop->prop_data[1] = enable;
+ dbg_prop->prop_data[2] = core_info;
+
+ hfi_write_cmd(prop);
+ kfree(prop);
+
+ return 0;
+}
+
+int hfi_set_debug_level(u64 a5_dbg_type, uint32_t lvl)
+{
+ uint8_t *prop;
+ struct hfi_cmd_prop *dbg_prop;
+ uint32_t size = 0, val;
+
+ val = HFI_DEBUG_MSG_LOW |
+ HFI_DEBUG_MSG_MEDIUM |
+ HFI_DEBUG_MSG_HIGH |
+ HFI_DEBUG_MSG_ERROR |
+ HFI_DEBUG_MSG_FATAL |
+ HFI_DEBUG_MSG_PERF |
+ HFI_DEBUG_CFG_WFI |
+ HFI_DEBUG_CFG_ARM9WD;
+
+ if (lvl > val)
+ return -EINVAL;
+
+ size = sizeof(struct hfi_cmd_prop) +
+ sizeof(struct hfi_debug);
+
+ prop = kzalloc(size, GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ dbg_prop = (struct hfi_cmd_prop *)prop;
+ dbg_prop->size = size;
+ dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ dbg_prop->num_prop = 1;
+ dbg_prop->prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
+ dbg_prop->prop_data[1] = lvl;
+ dbg_prop->prop_data[2] = a5_dbg_type;
+ hfi_write_cmd(prop);
+
+ kfree(prop);
+
+ return 0;
+}
+
+int hfi_set_fw_dump_level(uint32_t lvl)
+{
+ uint8_t *prop = NULL;
+ struct hfi_cmd_prop *fw_dump_level_switch_prop = NULL;
+ uint32_t size = 0;
+
+ CAM_DBG(CAM_HFI, "fw dump ENTER");
+
+ size = sizeof(struct hfi_cmd_prop) + sizeof(lvl);
+ prop = kzalloc(size, GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ fw_dump_level_switch_prop = (struct hfi_cmd_prop *)prop;
+ fw_dump_level_switch_prop->size = size;
+ fw_dump_level_switch_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ fw_dump_level_switch_prop->num_prop = 1;
+ fw_dump_level_switch_prop->prop_data[0] = HFI_PROP_SYS_FW_DUMP_CFG;
+ fw_dump_level_switch_prop->prop_data[1] = lvl;
+
+ CAM_DBG(CAM_HFI, "prop->size = %d\n"
+ "prop->pkt_type = %d\n"
+ "prop->num_prop = %d\n"
+ "prop->prop_data[0] = %d\n"
+ "prop->prop_data[1] = %d\n",
+ fw_dump_level_switch_prop->size,
+ fw_dump_level_switch_prop->pkt_type,
+ fw_dump_level_switch_prop->num_prop,
+ fw_dump_level_switch_prop->prop_data[0],
+ fw_dump_level_switch_prop->prop_data[1]);
+
+ hfi_write_cmd(prop);
+ kfree(prop);
+ return 0;
+}
+
+void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size)
+{
+ switch (type) {
+ case HFI_CMD_SYS_INIT: {
+ struct hfi_cmd_sys_init init;
+
+ memset(&init, 0, sizeof(init));
+
+ init.size = sizeof(struct hfi_cmd_sys_init);
+ init.pkt_type = type;
+ hfi_write_cmd(&init);
+ }
+ break;
+ case HFI_CMD_SYS_PC_PREP: {
+ struct hfi_cmd_pc_prep prep;
+
+ prep.size = sizeof(struct hfi_cmd_pc_prep);
+ prep.pkt_type = type;
+ hfi_write_cmd(&prep);
+ }
+ break;
+ case HFI_CMD_SYS_SET_PROPERTY: {
+ struct hfi_cmd_prop prop;
+
+ if ((uint32_t)data == (uint32_t)HFI_PROP_SYS_DEBUG_CFG) {
+ prop.size = sizeof(struct hfi_cmd_prop);
+ prop.pkt_type = type;
+ prop.num_prop = 1;
+ prop.prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
+ hfi_write_cmd(&prop);
+ }
+ }
+ break;
+ case HFI_CMD_SYS_GET_PROPERTY:
+ break;
+ case HFI_CMD_SYS_PING: {
+ struct hfi_cmd_ping_pkt ping;
+
+ ping.size = sizeof(struct hfi_cmd_ping_pkt);
+ ping.pkt_type = type;
+ ping.user_data = (uint64_t)data;
+ hfi_write_cmd(&ping);
+ }
+ break;
+ case HFI_CMD_SYS_RESET: {
+ struct hfi_cmd_sys_reset_pkt reset;
+
+ reset.size = sizeof(struct hfi_cmd_sys_reset_pkt);
+ reset.pkt_type = type;
+ reset.user_data = (uint64_t)data;
+ hfi_write_cmd(&reset);
+ }
+ break;
+ case HFI_CMD_IPEBPS_CREATE_HANDLE: {
+ struct hfi_cmd_create_handle handle;
+
+ handle.size = sizeof(struct hfi_cmd_create_handle);
+ handle.pkt_type = type;
+ handle.handle_type = (uint32_t)data;
+ handle.user_data1 = 0;
+ hfi_write_cmd(&handle);
+ }
+ break;
+ case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT:
+ break;
+ default:
+ CAM_ERR(CAM_HFI, "command not supported :%d", type);
+ break;
+ }
+}
+
+
+int hfi_get_hw_caps(void *query_buf)
+{
+ int i = 0;
+ struct cam_icp_query_cap_cmd *query_cmd = NULL;
+
+ if (!query_buf) {
+ CAM_ERR(CAM_HFI, "query buf is NULL");
+ return -EINVAL;
+ }
+
+ query_cmd = (struct cam_icp_query_cap_cmd *)query_buf;
+ query_cmd->fw_version.major = 0x12;
+ query_cmd->fw_version.minor = 0x12;
+ query_cmd->fw_version.revision = 0x12;
+
+ query_cmd->api_version.major = 0x13;
+ query_cmd->api_version.minor = 0x13;
+ query_cmd->api_version.revision = 0x13;
+
+ query_cmd->num_ipe = 2;
+ query_cmd->num_bps = 1;
+
+ for (i = 0; i < CAM_ICP_DEV_TYPE_MAX; i++) {
+ query_cmd->dev_ver[i].dev_type = i;
+ query_cmd->dev_ver[i].hw_ver.major = 0x34 + i;
+ query_cmd->dev_ver[i].hw_ver.minor = 0x34 + i;
+ query_cmd->dev_ver[i].hw_ver.incr = 0x34 + i;
+ }
+ return 0;
+}
+
+void cam_hfi_disable_cpu(void __iomem *icp_base)
+{
+ uint32_t data;
+ uint32_t val;
+ uint32_t try = 0;
+
+ while (try < HFI_MAX_POLL_TRY) {
+ data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+ CAM_DBG(CAM_HFI, "wfi status = %x\n", (int)data);
+
+ if (data & ICP_CSR_A5_STATUS_WFI)
+ break;
+ /* Need to poll here to confirm that FW is going trigger wfi
+ * and Host can the proceed. No interrupt is expected from FW
+ * at this time.
+ */
+ msleep(100);
+ try++;
+ }
+
+ val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
+ cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+
+ val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+ cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+
+ cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
+ icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+ cam_io_w_mb((uint32_t)INTR_DISABLE,
+ icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+}
+
+void cam_hfi_enable_cpu(void __iomem *icp_base)
+{
+ cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN,
+ icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ cam_io_w_mb((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+}
+
+int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
+ void __iomem *icp_base, bool debug)
+{
+ int rc = 0;
+ uint32_t data;
+ uint32_t fw_version, status = 0;
+ uint32_t retry_cnt = 0;
+
+ cam_hfi_enable_cpu(icp_base);
+ g_hfi->csr_base = icp_base;
+
+ if (debug) {
+ cam_io_w_mb(ICP_FLAG_A5_CTRL_DBG_EN,
+ (icp_base + HFI_REG_A5_CSR_A5_CONTROL));
+
+ /* Barrier needed as next write should be done after
+ * sucessful previous write. Next write enable clock
+ * gating
+ */
+ wmb();
+
+ cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
+ icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+
+ } else {
+ cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
+ icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ }
+
+ while (retry_cnt < HFI_MAX_POLL_TRY) {
+ readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+ status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
+
+ CAM_DBG(CAM_HFI, "1: status = %u", status);
+ status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+ CAM_DBG(CAM_HFI, "2: status = %u", status);
+ if (status == ICP_INIT_RESP_SUCCESS)
+ break;
+
+ if (status == ICP_INIT_RESP_FAILED) {
+ CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
+ status);
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+ return -EINVAL;
+ }
+ retry_cnt++;
+ }
+
+ if ((retry_cnt == HFI_MAX_POLL_TRY) &&
+ (status == ICP_INIT_RESP_RESET)) {
+ CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
+ status);
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+ return -EINVAL;
+ }
+
+ cam_io_w_mb((uint32_t)(INTR_ENABLE|INTR_ENABLE_WD0),
+ icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);
+
+ data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+ CAM_DBG(CAM_HFI, "wfi status = %x", (int)data);
+
+ cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
+ icp_base + HFI_REG_SFR_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
+ icp_base + HFI_REG_SHARED_MEM_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
+ icp_base + HFI_REG_SHARED_MEM_SIZE);
+ cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
+ icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
+ icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+ cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
+ icp_base + HFI_REG_QDSS_IOVA);
+ cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
+ icp_base + HFI_REG_QDSS_IOVA_SIZE);
+
+ return rc;
+}
+
+int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
+ void __iomem *icp_base, bool debug)
+{
+ int rc = 0;
+ struct hfi_qtbl *qtbl;
+ struct hfi_qtbl_hdr *qtbl_hdr;
+ struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
+ uint32_t hw_version, soc_version, fw_version, status = 0;
+ uint32_t retry_cnt = 0;
+ struct sfr_buf *sfr_buffer;
+
+ mutex_lock(&hfi_cmd_q_mutex);
+ mutex_lock(&hfi_msg_q_mutex);
+
+ if (!g_hfi) {
+ g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
+ if (!g_hfi) {
+ rc = -ENOMEM;
+ goto alloc_fail;
+ }
+ }
+
+ if (g_hfi->hfi_state != HFI_DEINIT) {
+ CAM_ERR(CAM_HFI, "hfi_init: invalid state");
+ return -EINVAL;
+ }
+
+ memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
+ g_hfi->hfi_state = HFI_DEINIT;
+ soc_version = socinfo_get_version();
+ if (debug) {
+ cam_io_w_mb(
+ (uint32_t)(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN |
+ ICP_CSR_EDBGRQ | ICP_CSR_DBGSWENABLE),
+ icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ msleep(100);
+ cam_io_w_mb((uint32_t)(ICP_FLAG_CSR_A5_EN |
+ ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI),
+ icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ } else {
+ /* Due to hardware bug in V1 ICP clock gating has to be
+ * disabled, this is supposed to be fixed in V-2. But enabling
+ * the clock gating is causing the firmware hang, hence
+ * disabling the clock gating on both V1 and V2 until the
+ * hardware team root causes this
+ */
+ cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN |
+ ICP_FLAG_CSR_WAKE_UP_EN |
+ ICP_CSR_EN_CLKGATE_WFI,
+ icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ }
+
+ qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
+ qtbl_hdr = &qtbl->q_tbl_hdr;
+ qtbl_hdr->qtbl_version = 0xFFFFFFFF;
+ qtbl_hdr->qtbl_size = sizeof(struct hfi_qtbl);
+ qtbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_qtbl_hdr);
+ qtbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_q_hdr);
+ qtbl_hdr->qtbl_num_q = ICP_HFI_NUMBER_OF_QS;
+ qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS;
+
+ /* setup host-to-firmware command queue */
+ cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
+ cmd_q_hdr->qhdr_status = QHDR_ACTIVE;
+ cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova;
+ cmd_q_hdr->qhdr_q_size = ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+ cmd_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+ cmd_q_hdr->qhdr_pkt_drop_cnt = RESET;
+ cmd_q_hdr->qhdr_read_idx = RESET;
+ cmd_q_hdr->qhdr_write_idx = RESET;
+
+ /* setup firmware-to-Host message queue */
+ msg_q_hdr = &qtbl->q_hdr[Q_MSG];
+ msg_q_hdr->qhdr_status = QHDR_ACTIVE;
+ msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova;
+ msg_q_hdr->qhdr_q_size = ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+ msg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+ msg_q_hdr->qhdr_pkt_drop_cnt = RESET;
+ msg_q_hdr->qhdr_read_idx = RESET;
+ msg_q_hdr->qhdr_write_idx = RESET;
+
+ /* setup firmware-to-Host message queue */
+ dbg_q_hdr = &qtbl->q_hdr[Q_DBG];
+ dbg_q_hdr->qhdr_status = QHDR_ACTIVE;
+ dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova;
+ dbg_q_hdr->qhdr_q_size = ICP_DBG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+ dbg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+ dbg_q_hdr->qhdr_pkt_drop_cnt = RESET;
+ dbg_q_hdr->qhdr_read_idx = RESET;
+ dbg_q_hdr->qhdr_write_idx = RESET;
+
+ sfr_buffer = (struct sfr_buf *)hfi_mem->sfr_buf.kva;
+ sfr_buffer->size = ICP_MSG_SFR_SIZE_IN_BYTES;
+
+ switch (event_driven_mode) {
+ case INTR_MODE:
+ cmd_q_hdr->qhdr_type = Q_CMD;
+ cmd_q_hdr->qhdr_rx_wm = SET;
+ cmd_q_hdr->qhdr_tx_wm = SET;
+ cmd_q_hdr->qhdr_rx_req = SET;
+ cmd_q_hdr->qhdr_tx_req = RESET;
+ cmd_q_hdr->qhdr_rx_irq_status = RESET;
+ cmd_q_hdr->qhdr_tx_irq_status = RESET;
+
+ msg_q_hdr->qhdr_type = Q_MSG;
+ msg_q_hdr->qhdr_rx_wm = SET;
+ msg_q_hdr->qhdr_tx_wm = SET;
+ msg_q_hdr->qhdr_rx_req = SET;
+ msg_q_hdr->qhdr_tx_req = RESET;
+ msg_q_hdr->qhdr_rx_irq_status = RESET;
+ msg_q_hdr->qhdr_tx_irq_status = RESET;
+
+ dbg_q_hdr->qhdr_type = Q_DBG;
+ dbg_q_hdr->qhdr_rx_wm = SET;
+ dbg_q_hdr->qhdr_tx_wm = SET_WM;
+ dbg_q_hdr->qhdr_rx_req = RESET;
+ dbg_q_hdr->qhdr_tx_req = RESET;
+ dbg_q_hdr->qhdr_rx_irq_status = RESET;
+ dbg_q_hdr->qhdr_tx_irq_status = RESET;
+
+ break;
+
+ case POLL_MODE:
+ cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_POLL_MODE_2 |
+ RX_EVENT_POLL_MODE_2;
+ msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_POLL_MODE_2 |
+ RX_EVENT_POLL_MODE_2;
+ dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_POLL_MODE_2 |
+ RX_EVENT_POLL_MODE_2;
+ break;
+
+ case WM_MODE:
+ cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_DRIVEN_MODE_2 |
+ RX_EVENT_DRIVEN_MODE_2;
+ cmd_q_hdr->qhdr_rx_wm = SET;
+ cmd_q_hdr->qhdr_tx_wm = SET;
+ cmd_q_hdr->qhdr_rx_req = RESET;
+ cmd_q_hdr->qhdr_tx_req = SET;
+ cmd_q_hdr->qhdr_rx_irq_status = RESET;
+ cmd_q_hdr->qhdr_tx_irq_status = RESET;
+
+ msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_DRIVEN_MODE_2 |
+ RX_EVENT_DRIVEN_MODE_2;
+ msg_q_hdr->qhdr_rx_wm = SET;
+ msg_q_hdr->qhdr_tx_wm = SET;
+ msg_q_hdr->qhdr_rx_req = SET;
+ msg_q_hdr->qhdr_tx_req = RESET;
+ msg_q_hdr->qhdr_rx_irq_status = RESET;
+ msg_q_hdr->qhdr_tx_irq_status = RESET;
+
+ dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_DRIVEN_MODE_2 |
+ RX_EVENT_DRIVEN_MODE_2;
+ dbg_q_hdr->qhdr_rx_wm = SET;
+ dbg_q_hdr->qhdr_tx_wm = SET_WM;
+ dbg_q_hdr->qhdr_rx_req = RESET;
+ dbg_q_hdr->qhdr_tx_req = RESET;
+ dbg_q_hdr->qhdr_rx_irq_status = RESET;
+ dbg_q_hdr->qhdr_tx_irq_status = RESET;
+ break;
+
+ default:
+ CAM_ERR(CAM_HFI, "Invalid event driven mode :%u",
+ event_driven_mode);
+ break;
+ }
+
+ cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova,
+ icp_base + HFI_REG_QTBL_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
+ icp_base + HFI_REG_SFR_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
+ icp_base + HFI_REG_SHARED_MEM_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
+ icp_base + HFI_REG_SHARED_MEM_SIZE);
+ cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
+ icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+ cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
+ icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+ cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET,
+ icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+ cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
+ icp_base + HFI_REG_QDSS_IOVA);
+ cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
+ icp_base + HFI_REG_QDSS_IOVA_SIZE);
+
+ hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
+
+ while (retry_cnt < HFI_MAX_POLL_TRY) {
+ readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+ status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
+
+ CAM_DBG(CAM_HFI, "1: status = %u rc = %d", status, rc);
+ status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+ CAM_DBG(CAM_HFI, "2: status = %u rc = %d", status, rc);
+ if (status == ICP_INIT_RESP_SUCCESS)
+ break;
+
+ if (status == ICP_INIT_RESP_FAILED) {
+ CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
+ status);
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+ goto regions_fail;
+ }
+ retry_cnt++;
+ }
+
+ if ((retry_cnt == HFI_MAX_POLL_TRY) &&
+ (status == ICP_INIT_RESP_RESET)) {
+ CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
+ status);
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_ERR(CAM_HFI,
+ "hw version : : [%x], fw version : [%x]",
+ hw_version, fw_version);
+ goto regions_fail;
+ }
+
+ fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+ CAM_DBG(CAM_HFI, "hw version : : [%x], fw version : [%x]",
+ hw_version, fw_version);
+
+ g_hfi->csr_base = icp_base;
+ g_hfi->hfi_state = HFI_READY;
+ g_hfi->cmd_q_state = true;
+ g_hfi->msg_q_state = true;
+ cam_io_w_mb((uint32_t)(INTR_ENABLE|INTR_ENABLE_WD0),
+ icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
+
+ return rc;
+regions_fail:
+ kfree(g_hfi);
+ g_hfi = NULL;
+alloc_fail:
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
+ return rc;
+}
+
+void cam_hfi_deinit(void __iomem *icp_base)
+{
+ mutex_lock(&hfi_cmd_q_mutex);
+ mutex_lock(&hfi_msg_q_mutex);
+
+ if (!g_hfi) {
+ CAM_ERR(CAM_HFI, "hfi path not established yet");
+ goto err;
+ }
+
+ g_hfi->cmd_q_state = false;
+ g_hfi->msg_q_state = false;
+
+ kzfree(g_hfi);
+ g_hfi = NULL;
+
+err:
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/Makefile
new file mode 100644
index 000000000000..db4ae6c1376a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw_mgr/ a5_hw/ ipe_hw/ bps_hw/
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/Makefile
new file mode 100644
index 000000000000..055d9cd58db4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_core.c
new file mode 100644
index 000000000000..18bd6d8dd2c7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -0,0 +1,483 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/elf.h>
+#include <media/cam_icp.h>
+#include "cam_io_util.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "a5_core.h"
+#include "a5_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "hfi_intf.h"
+#include "hfi_sys_defs.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
+ struct cam_icp_cpas_vote *cpas_vote)
+{
+ int rc = 0;
+
+ if (cpas_vote->ahb_vote_valid)
+ rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+ &cpas_vote->ahb_vote);
+
+ if (cpas_vote->axi_vote_valid)
+ rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+ &cpas_vote->axi_vote);
+
+ if (rc)
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
+
+ return rc;
+}
+
+static int32_t cam_icp_validate_fw(const uint8_t *elf)
+{
+ struct elf32_hdr *elf_hdr;
+
+ if (!elf) {
+ CAM_ERR(CAM_ICP, "Invalid params");
+ return -EINVAL;
+ }
+
+ elf_hdr = (struct elf32_hdr *)elf;
+
+ if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) {
+ CAM_ERR(CAM_ICP, "ICP elf identifier is failed");
+ return -EINVAL;
+ }
+
+ /* check architecture */
+ if (elf_hdr->e_machine != EM_ARM) {
+ CAM_ERR(CAM_ICP, "unsupported arch");
+ return -EINVAL;
+ }
+
+ /* check elf bit format */
+ if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
+ CAM_ERR(CAM_ICP, "elf doesn't support 32 bit format");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int32_t cam_icp_get_fw_size(const uint8_t *elf, uint32_t *fw_size)
+{
+ int32_t rc = 0;
+ int32_t i = 0;
+ uint32_t num_prg_hdrs;
+ unsigned char *icp_prg_hdr_tbl;
+ uint32_t seg_mem_size = 0;
+ struct elf32_hdr *elf_hdr;
+ struct elf32_phdr *prg_hdr;
+
+ if (!elf || !fw_size) {
+ CAM_ERR(CAM_ICP, "invalid args");
+ return -EINVAL;
+ }
+
+ *fw_size = 0;
+
+ elf_hdr = (struct elf32_hdr *)elf;
+ num_prg_hdrs = elf_hdr->e_phnum;
+ icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
+ prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
+
+ if (!prg_hdr) {
+ CAM_ERR(CAM_ICP, "failed to get elf program header attr");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ICP, "num_prg_hdrs = %d", num_prg_hdrs);
+ for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
+ if (prg_hdr->p_flags == 0)
+ continue;
+
+ seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) &
+ ~(prg_hdr->p_align - 1);
+ seg_mem_size += prg_hdr->p_vaddr;
+ CAM_DBG(CAM_ICP, "memsz:%x align:%x addr:%x seg_mem_size:%x",
+ (int)prg_hdr->p_memsz, (int)prg_hdr->p_align,
+ (int)prg_hdr->p_vaddr, (int)seg_mem_size);
+ if (*fw_size < seg_mem_size)
+ *fw_size = seg_mem_size;
+
+ }
+
+ if (*fw_size == 0) {
+ CAM_ERR(CAM_ICP, "invalid elf fw file");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int32_t cam_icp_program_fw(const uint8_t *elf,
+ struct cam_a5_device_core_info *core_info)
+{
+ int32_t rc = 0;
+ uint32_t num_prg_hdrs;
+ unsigned char *icp_prg_hdr_tbl;
+ int32_t i = 0;
+ u8 *dest;
+ u8 *src;
+ struct elf32_hdr *elf_hdr;
+ struct elf32_phdr *prg_hdr;
+
+ elf_hdr = (struct elf32_hdr *)elf;
+ num_prg_hdrs = elf_hdr->e_phnum;
+ icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
+ prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
+
+ if (!prg_hdr) {
+ CAM_ERR(CAM_ICP, "failed to get elf program header attr");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
+ if (prg_hdr->p_flags == 0)
+ continue;
+
+ CAM_DBG(CAM_ICP, "Loading FW header size: %u",
+ prg_hdr->p_filesz);
+ if (prg_hdr->p_filesz != 0) {
+ src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
+ dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
+ prg_hdr->p_vaddr);
+
+ memcpy_toio(dest, src, prg_hdr->p_filesz);
+ }
+ }
+
+ return rc;
+}
+
+static int32_t cam_a5_download_fw(void *device_priv)
+{
+ int32_t rc = 0;
+ uint32_t fw_size;
+ const uint8_t *fw_start = NULL;
+ struct cam_hw_info *a5_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_a5_device_core_info *core_info = NULL;
+ struct cam_a5_device_hw_info *hw_info = NULL;
+ struct platform_device *pdev = NULL;
+ struct a5_soc_info *cam_a5_soc_info = NULL;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &a5_dev->soc_info;
+ core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+ hw_info = core_info->a5_hw_info;
+ pdev = soc_info->pdev;
+ cam_a5_soc_info = soc_info->soc_private;
+
+ rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
+ return rc;
+ }
+
+ if (!core_info->fw_elf) {
+ CAM_ERR(CAM_ICP, "Invalid elf size");
+ rc = -EINVAL;
+ goto fw_download_failed;
+ }
+
+ fw_start = core_info->fw_elf->data;
+ rc = cam_icp_validate_fw(fw_start);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "fw elf validation failed");
+ goto fw_download_failed;
+ }
+
+ rc = cam_icp_get_fw_size(fw_start, &fw_size);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to get fw size");
+ goto fw_download_failed;
+ }
+
+ if (core_info->fw_buf_len < fw_size) {
+ CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu",
+ fw_size, core_info->fw_buf_len);
+ rc = -EINVAL;
+ goto fw_download_failed;
+ }
+
+ rc = cam_icp_program_fw(fw_start, core_info);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "fw program is failed");
+ goto fw_download_failed;
+ }
+
+fw_download_failed:
+ release_firmware(core_info->fw_elf);
+ return rc;
+}
+
+int cam_a5_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *a5_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_a5_device_core_info *core_info = NULL;
+ struct cam_icp_cpas_vote cpas_vote;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &a5_dev->soc_info;
+ core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+
+ if ((!soc_info) || (!core_info)) {
+ CAM_ERR(CAM_ICP, "soc_info: %pK core_info: %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+ cpas_vote.axi_vote.compressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
+ cpas_vote.axi_vote.uncompressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
+
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_start = true;
+
+ rc = cam_a5_enable_soc_resources(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
+ if (cam_cpas_stop(core_info->cpas_handle))
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
+ else
+ core_info->cpas_start = false;
+ }
+
+ return rc;
+}
+
+int cam_a5_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *a5_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_a5_device_core_info *core_info = NULL;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &a5_dev->soc_info;
+ core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+ if ((!soc_info) || (!core_info)) {
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ rc = cam_a5_disable_soc_resources(soc_info);
+ if (rc)
+ CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
+
+ if (core_info->cpas_start) {
+ if (cam_cpas_stop(core_info->cpas_handle))
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
+ else
+ core_info->cpas_start = false;
+ }
+
+ return rc;
+}
+
+irqreturn_t cam_a5_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *a5_dev = data;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_a5_device_core_info *core_info = NULL;
+ struct cam_a5_device_hw_info *hw_info = NULL;
+ uint32_t irq_status = 0;
+
+ if (!data) {
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info or query_cap args");
+ return IRQ_HANDLED;
+ }
+
+ soc_info = &a5_dev->soc_info;
+ core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+ hw_info = core_info->a5_hw_info;
+
+ irq_status = cam_io_r_mb(soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+ core_info->a5_hw_info->a5_host_int_status);
+
+ cam_io_w_mb(irq_status,
+ soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+ core_info->a5_hw_info->a5_host_int_clr);
+
+ if ((irq_status & A5_WDT_0) ||
+ (irq_status & A5_WDT_1)) {
+ CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
+ }
+
+ spin_lock(&a5_dev->hw_lock);
+ if (core_info->irq_cb.icp_hw_mgr_cb)
+ core_info->irq_cb.icp_hw_mgr_cb(irq_status,
+ core_info->irq_cb.data);
+ spin_unlock(&a5_dev->hw_lock);
+
+ return IRQ_HANDLED;
+}
+
+int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *a5_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_a5_device_core_info *core_info = NULL;
+ struct cam_a5_device_hw_info *hw_info = NULL;
+ struct a5_soc_info *a5_soc = NULL;
+ unsigned long flags;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (cmd_type >= CAM_ICP_A5_CMD_MAX) {
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
+ return -EINVAL;
+ }
+
+ soc_info = &a5_dev->soc_info;
+ core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+ hw_info = core_info->a5_hw_info;
+
+ switch (cmd_type) {
+ case CAM_ICP_A5_CMD_FW_DOWNLOAD:
+ rc = cam_a5_download_fw(device_priv);
+ break;
+ case CAM_ICP_A5_CMD_SET_FW_BUF: {
+ struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_ICP, "cmd args NULL");
+ return -EINVAL;
+ }
+
+ core_info->fw_buf = fw_buf_info->iova;
+ core_info->fw_kva_addr = fw_buf_info->kva;
+ core_info->fw_buf_len = fw_buf_info->len;
+
+ CAM_DBG(CAM_ICP, "fw buf info = %x %llx %lld",
+ core_info->fw_buf, core_info->fw_kva_addr,
+ core_info->fw_buf_len);
+ break;
+ }
+ case CAM_ICP_A5_SET_IRQ_CB: {
+ struct cam_icp_a5_set_irq_cb *irq_cb = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_ICP, "cmd args NULL");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&a5_dev->hw_lock, flags);
+ core_info->irq_cb.icp_hw_mgr_cb = irq_cb->icp_hw_mgr_cb;
+ core_info->irq_cb.data = irq_cb->data;
+ spin_unlock_irqrestore(&a5_dev->hw_lock, flags);
+ break;
+ }
+
+ case CAM_ICP_A5_SEND_INIT:
+ hfi_send_system_cmd(HFI_CMD_SYS_INIT, 0, 0);
+ break;
+
+ case CAM_ICP_A5_CMD_PC_PREP:
+ hfi_send_system_cmd(HFI_CMD_SYS_PC_PREP, 0, 0);
+ break;
+
+ case CAM_ICP_A5_CMD_VOTE_CPAS: {
+ struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_ICP, "cmd args NULL");
+ return -EINVAL;
+ }
+
+ cam_a5_cpas_vote(core_info, cpas_vote);
+ break;
+ }
+
+ case CAM_ICP_A5_CMD_CPAS_START: {
+ struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_ICP, "cmd args NULL");
+ return -EINVAL;
+ }
+
+ if (!core_info->cpas_start) {
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &cpas_vote->ahb_vote,
+ &cpas_vote->axi_vote);
+ core_info->cpas_start = true;
+ }
+ break;
+ }
+
+ case CAM_ICP_A5_CMD_CPAS_STOP:
+ if (core_info->cpas_start) {
+ cam_cpas_stop(core_info->cpas_handle);
+ core_info->cpas_start = false;
+ }
+ break;
+ case CAM_ICP_A5_CMD_UBWC_CFG:
+ a5_soc = soc_info->soc_private;
+ if (!a5_soc) {
+ CAM_ERR(CAM_ICP, "A5 private soc info is NULL");
+ return -EINVAL;
+ }
+ rc = hfi_cmd_ubwc_config(a5_soc->ubwc_cfg);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_core.h
new file mode 100644
index 000000000000..9b02167b3c7f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_core.h
@@ -0,0 +1,89 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_A5_CORE_H
+#define CAM_A5_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include "cam_a5_hw_intf.h"
+
+#define A5_QGIC_BASE 0
+#define A5_SIERRA_BASE 1
+#define A5_CSR_BASE 2
+
+#define A5_HOST_INT 0x1
+#define A5_WDT_0 0x2
+#define A5_WDT_1 0x4
+
+#define ELF_GUARD_PAGE (2 * 1024 * 1024)
+
+struct cam_a5_device_hw_info {
+ uint32_t hw_ver;
+ uint32_t nsec_reset;
+ uint32_t a5_control;
+ uint32_t a5_host_int_en;
+ uint32_t a5_host_int;
+ uint32_t a5_host_int_clr;
+ uint32_t a5_host_int_status;
+ uint32_t a5_host_int_set;
+ uint32_t host_a5_int;
+ uint32_t fw_version;
+ uint32_t init_req;
+ uint32_t init_response;
+ uint32_t shared_mem_ptr;
+ uint32_t shared_mem_size;
+ uint32_t qtbl_ptr;
+ uint32_t uncached_heap_ptr;
+ uint32_t uncached_heap_size;
+ uint32_t a5_status;
+};
+
+/**
+ * struct cam_a5_device_hw_info
+ * @a5_hw_info: A5 hardware info
+ * @fw_elf: start address of fw start with elf header
+ * @fw: start address of fw blob
+ * @fw_buf: smmu alloc/mapped fw buffer
+ * @fw_buf_len: fw buffer length
+ * @query_cap: A5 query info from firmware
+ * @a5_acquire: Acquire information of A5
+ * @irq_cb: IRQ callback
+ * @cpas_handle: CPAS handle for A5
+ * @cpast_start: state variable for cpas
+ */
+struct cam_a5_device_core_info {
+ struct cam_a5_device_hw_info *a5_hw_info;
+ const struct firmware *fw_elf;
+ void *fw;
+ uint32_t fw_buf;
+ uintptr_t fw_kva_addr;
+ uint64_t fw_buf_len;
+ struct cam_icp_a5_query_cap query_cap;
+ struct cam_icp_a5_acquire_dev a5_acquire[8];
+ struct cam_icp_a5_set_irq_cb irq_cb;
+ uint32_t cpas_handle;
+ bool cpas_start;
+};
+
+int cam_a5_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_a5_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_a5_irq(int irq_num, void *data);
+#endif /* CAM_A5_CORE_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_dev.c
new file mode 100644
index 000000000000..3b652e72466e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_dev.c
@@ -0,0 +1,235 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "a5_core.h"
+#include "a5_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+struct a5_soc_info cam_a5_soc_info;
+EXPORT_SYMBOL(cam_a5_soc_info);
+
+struct cam_a5_device_hw_info cam_a5_hw_info = {
+ .hw_ver = 0x0,
+ .nsec_reset = 0x4,
+ .a5_control = 0x8,
+ .a5_host_int_en = 0x10,
+ .a5_host_int = 0x14,
+ .a5_host_int_clr = 0x18,
+ .a5_host_int_status = 0x1c,
+ .a5_host_int_set = 0x20,
+ .host_a5_int = 0x30,
+ .fw_version = 0x44,
+ .init_req = 0x48,
+ .init_response = 0x4c,
+ .shared_mem_ptr = 0x50,
+ .shared_mem_size = 0x54,
+ .qtbl_ptr = 0x58,
+ .uncached_heap_ptr = 0x5c,
+ .uncached_heap_size = 0x60,
+ .a5_status = 0x200,
+};
+EXPORT_SYMBOL(cam_a5_hw_info);
+
+static bool cam_a5_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ bool error_handled = false;
+
+ if (!irq_data)
+ return error_handled;
+
+ switch (irq_data->irq_type) {
+ case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ICP,
+ "IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+ irq_data->irq_type,
+ irq_data->u.dec_err.decerr_status.value,
+ irq_data->u.dec_err.decerr_status.thr_err,
+ irq_data->u.dec_err.decerr_status.fcl_err,
+ irq_data->u.dec_err.decerr_status.len_md_err,
+ irq_data->u.dec_err.decerr_status.format_err);
+ error_handled = true;
+ break;
+ case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ICP,
+ "IPE/BPS UBWC Encode error type=%d status=%x",
+ irq_data->irq_type,
+ irq_data->u.enc_err.encerr_status.value);
+ error_handled = true;
+ break;
+ default:
+ break;
+ }
+
+ return error_handled;
+}
+
+int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
+ struct cam_a5_device_core_info *core_info,
+ uint32_t hw_idx)
+{
+ struct cam_cpas_register_params cpas_register_params;
+ int rc;
+
+ cpas_register_params.dev = &soc_info->pdev->dev;
+ memcpy(cpas_register_params.identifier, "icp", sizeof("icp"));
+ cpas_register_params.cam_cpas_client_cb = cam_a5_cpas_cb;
+ cpas_register_params.cell_index = hw_idx;
+ cpas_register_params.userdata = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_params);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
+ return rc;
+ }
+
+ core_info->cpas_handle = cpas_register_params.client_handle;
+ return rc;
+}
+
+int cam_a5_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct cam_hw_info *a5_dev = NULL;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_a5_device_core_info *core_info = NULL;
+ struct cam_a5_device_hw_info *hw_info = NULL;
+
+ a5_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!a5_dev_intf)
+ return -ENOMEM;
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &a5_dev_intf->hw_idx);
+
+ a5_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!a5_dev) {
+ rc = -ENOMEM;
+ goto a5_dev_alloc_failure;
+ }
+
+ a5_dev->soc_info.pdev = pdev;
+ a5_dev->soc_info.dev = &pdev->dev;
+ a5_dev->soc_info.dev_name = pdev->name;
+ a5_dev_intf->hw_priv = a5_dev;
+ a5_dev_intf->hw_ops.init = cam_a5_init_hw;
+ a5_dev_intf->hw_ops.deinit = cam_a5_deinit_hw;
+ a5_dev_intf->hw_ops.process_cmd = cam_a5_process_cmd;
+ a5_dev_intf->hw_type = CAM_ICP_DEV_A5;
+
+ CAM_DBG(CAM_ICP, "type %d index %d",
+ a5_dev_intf->hw_type,
+ a5_dev_intf->hw_idx);
+
+ platform_set_drvdata(pdev, a5_dev_intf);
+
+ a5_dev->core_info = kzalloc(sizeof(struct cam_a5_device_core_info),
+ GFP_KERNEL);
+ if (!a5_dev->core_info) {
+ rc = -ENOMEM;
+ goto core_info_alloc_failure;
+ }
+ core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_ERR(CAM_ICP, "No a5 hardware info");
+ rc = -EINVAL;
+ goto match_err;
+ }
+ hw_info = (struct cam_a5_device_hw_info *)match_dev->data;
+ core_info->a5_hw_info = hw_info;
+
+ a5_dev->soc_info.soc_private = &cam_a5_soc_info;
+
+ rc = cam_a5_init_soc_resources(&a5_dev->soc_info, cam_a5_irq,
+ a5_dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "failed to init_soc");
+ goto init_soc_failure;
+ }
+
+ CAM_DBG(CAM_ICP, "soc info : %pK",
+ (void *)&a5_dev->soc_info);
+ rc = cam_a5_register_cpas(&a5_dev->soc_info,
+ core_info, a5_dev_intf->hw_idx);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "a5 cpas registration failed");
+ goto cpas_reg_failed;
+ }
+ a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&a5_dev->hw_mutex);
+ spin_lock_init(&a5_dev->hw_lock);
+ init_completion(&a5_dev->hw_complete);
+
+ CAM_DBG(CAM_ICP, "A5%d probe successful",
+ a5_dev_intf->hw_idx);
+ return 0;
+
+cpas_reg_failed:
+init_soc_failure:
+match_err:
+ kfree(a5_dev->core_info);
+core_info_alloc_failure:
+ kfree(a5_dev);
+a5_dev_alloc_failure:
+ kfree(a5_dev_intf);
+
+ return rc;
+}
+
+static const struct of_device_id cam_a5_dt_match[] = {
+ {
+ .compatible = "qcom,cam-a5",
+ .data = &cam_a5_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_a5_dt_match);
+
+static struct platform_driver cam_a5_driver = {
+ .probe = cam_a5_probe,
+ .driver = {
+ .name = "cam-a5",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_a5_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_a5_init_module(void)
+{
+ return platform_driver_register(&cam_a5_driver);
+}
+
+static void __exit cam_a5_exit_module(void)
+{
+ platform_driver_unregister(&cam_a5_driver);
+}
+
+module_init(cam_a5_init_module);
+module_exit(cam_a5_exit_module);
+MODULE_DESCRIPTION("CAM A5 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_soc.c
new file mode 100644
index 000000000000..a439971d81b4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -0,0 +1,120 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "a5_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0, i;
+ const char *fw_name;
+ struct a5_soc_info *camp_a5_soc_info;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+ int num_ubwc_cfg;
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "get a5 dt prop is failed");
+ return rc;
+ }
+
+ camp_a5_soc_info = soc_info->soc_private;
+ fw_name = camp_a5_soc_info->fw_name;
+
+ rc = of_property_read_string(of_node, "fw_name", &fw_name);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "fw_name read failed");
+ goto end;
+ }
+
+ num_ubwc_cfg = of_property_count_u32_elems(of_node, "ubwc-cfg");
+ if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+ CAM_ERR(CAM_ICP, "wrong ubwc_cfg: %d", num_ubwc_cfg);
+ rc = num_ubwc_cfg;
+ goto end;
+ }
+
+ for (i = 0; i < num_ubwc_cfg; i++) {
+ rc = of_property_read_u32_index(of_node, "ubwc-cfg",
+ i, &camp_a5_soc_info->ubwc_cfg[i]);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "unable to read ubwc cfg values");
+ break;
+ }
+ }
+
+end:
+ return rc;
+}
+
+static int cam_a5_request_platform_resource(
+ struct cam_hw_soc_info *soc_info,
+ irq_handler_t a5_irq_handler, void *irq_data)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_request_platform_resource(soc_info, a5_irq_handler,
+ irq_data);
+
+ return rc;
+}
+
+int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t a5_irq_handler, void *irq_data)
+{
+ int rc = 0;
+
+ rc = cam_a5_get_dt_properties(soc_info);
+ if (rc < 0)
+ return rc;
+
+ rc = cam_a5_request_platform_resource(soc_info, a5_irq_handler,
+ irq_data);
+ if (rc < 0)
+ return rc;
+
+ return rc;
+}
+
+int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, true);
+ if (rc)
+ CAM_ERR(CAM_ICP, "enable platform failed");
+
+ return rc;
+}
+
+int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc)
+ CAM_ERR(CAM_ICP, "disable platform failed");
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_soc.h
new file mode 100644
index 000000000000..744ff56a330e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/a5_soc.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_A5_SOC_H
+#define CAM_A5_SOC_H
+
+#include "cam_soc_util.h"
+
+#define ICP_UBWC_MAX 2
+
+struct a5_soc_info {
+ char *fw_name;
+ uint32_t ubwc_cfg[ICP_UBWC_MAX];
+};
+
+int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t a5_irq_handler, void *irq_data);
+
+int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/Makefile
new file mode 100644
index 000000000000..de5c3b6164d0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_core.c
new file mode 100644
index 000000000000..b969c92ccbc8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -0,0 +1,405 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "bps_core.h"
+#include "bps_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_bps_hw_intf.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "hfi_reg.h"
+
+#define HFI_MAX_POLL_TRY 5
+
+static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info,
+ struct cam_icp_cpas_vote *cpas_vote)
+{
+ int rc = 0;
+
+ if (cpas_vote->ahb_vote_valid)
+ rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+ &cpas_vote->ahb_vote);
+ if (cpas_vote->axi_vote_valid)
+ rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+ &cpas_vote->axi_vote);
+
+ if (rc < 0)
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
+
+ return rc;
+}
+
+
+int cam_bps_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *bps_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_bps_device_core_info *core_info = NULL;
+ struct cam_icp_cpas_vote cpas_vote;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &bps_dev->soc_info;
+ core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+
+ if ((!soc_info) || (!core_info)) {
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+ cpas_vote.axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ cpas_vote.axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_start = true;
+
+ rc = cam_bps_enable_soc_resources(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
+ if (cam_cpas_stop(core_info->cpas_handle))
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
+ else
+ core_info->cpas_start = false;
+ } else {
+ core_info->clk_enable = true;
+ }
+
+ return rc;
+}
+
+int cam_bps_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *bps_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_bps_device_core_info *core_info = NULL;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &bps_dev->soc_info;
+ core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+ if ((!soc_info) || (!core_info)) {
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ rc = cam_bps_disable_soc_resources(soc_info, core_info->clk_enable);
+ if (rc)
+ CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
+ core_info->clk_enable = false;
+
+ if (core_info->cpas_start) {
+ if (cam_cpas_stop(core_info->cpas_handle))
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
+ else
+ core_info->cpas_start = false;
+ }
+
+ return rc;
+}
+
+static int cam_bps_handle_pc(struct cam_hw_info *bps_dev)
+{
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_bps_device_core_info *core_info = NULL;
+ struct cam_bps_device_hw_info *hw_info = NULL;
+ int pwr_ctrl;
+ int pwr_status;
+
+ soc_info = &bps_dev->soc_info;
+ core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+ hw_info = core_info->bps_hw_info;
+
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+ true, &pwr_ctrl);
+ if (!(pwr_ctrl & BPS_COLLAPSE_MASK)) {
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+ true, &pwr_status);
+
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0x1);
+
+ if ((pwr_status >> BPS_PWR_ON_MASK)) {
+ CAM_ERR(CAM_ICP, "BPS: pwr_status(%x):pwr_ctrl(%x)",
+ pwr_status, pwr_ctrl);
+ return -EINVAL;
+ }
+ }
+ cam_bps_get_gdsc_control(soc_info);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true,
+ &pwr_ctrl);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+ true, &pwr_status);
+ CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+ pwr_ctrl, pwr_status);
+
+ return 0;
+}
+
+static int cam_bps_handle_resume(struct cam_hw_info *bps_dev)
+{
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_bps_device_core_info *core_info = NULL;
+ struct cam_bps_device_hw_info *hw_info = NULL;
+ int pwr_ctrl;
+ int pwr_status;
+ int rc = 0;
+
+ soc_info = &bps_dev->soc_info;
+ core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+ hw_info = core_info->bps_hw_info;
+
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
+ if (pwr_ctrl & BPS_COLLAPSE_MASK) {
+ CAM_DBG(CAM_ICP, "BPS: pwr_ctrl set(%x)", pwr_ctrl);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0);
+ }
+
+ rc = cam_bps_transfer_gdsc_control(soc_info);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_status, true, &pwr_status);
+ CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+ pwr_ctrl, pwr_status);
+
+ return rc;
+}
+
+static int cam_bps_cmd_reset(struct cam_hw_soc_info *soc_info,
+ struct cam_bps_device_core_info *core_info)
+{
+ uint32_t retry_cnt = 0;
+ uint32_t status = 0;
+ int pwr_ctrl, pwr_status, rc = 0;
+ bool reset_bps_cdm_fail = false;
+ bool reset_bps_top_fail = false;
+
+ CAM_DBG(CAM_ICP, "CAM_ICP_BPS_CMD_RESET");
+ /* Reset BPS CDM core*/
+ cam_io_w_mb((uint32_t)0xF,
+ soc_info->reg_map[0].mem_base + BPS_CDM_RST_CMD);
+ while (retry_cnt < HFI_MAX_POLL_TRY) {
+ readw_poll_timeout((soc_info->reg_map[0].mem_base +
+ BPS_CDM_IRQ_STATUS),
+ status, ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+ 100, 10000);
+
+ CAM_DBG(CAM_ICP, "bps_cdm_irq_status = %u", status);
+
+ if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+ break;
+ retry_cnt++;
+ }
+ status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ BPS_CDM_IRQ_STATUS);
+ if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+ CAM_ERR(CAM_ICP, "BPS CDM rst failed status 0x%x", status);
+ reset_bps_cdm_fail = true;
+ }
+
+ /* Reset BPS core*/
+ status = 0;
+ cam_io_w_mb((uint32_t)0x3,
+ soc_info->reg_map[0].mem_base + BPS_TOP_RST_CMD);
+ while (retry_cnt < HFI_MAX_POLL_TRY) {
+ readw_poll_timeout((soc_info->reg_map[0].mem_base +
+ BPS_TOP_IRQ_STATUS),
+ status, ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+ 100, 10000);
+
+ CAM_DBG(CAM_ICP, "bps_top_irq_status = %u", status);
+
+ if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+ break;
+ retry_cnt++;
+ }
+ status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ BPS_TOP_IRQ_STATUS);
+ if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+ CAM_ERR(CAM_ICP, "BPS top rst failed status 0x%x", status);
+ reset_bps_top_fail = true;
+ }
+
+ cam_bps_get_gdsc_control(soc_info);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, core_info->bps_hw_info->pwr_ctrl,
+ true, &pwr_ctrl);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, core_info->bps_hw_info->pwr_status,
+ true, &pwr_status);
+ CAM_DBG(CAM_ICP, "(After) pwr_ctrl = %x pwr_status = %x",
+ pwr_ctrl, pwr_status);
+
+ if (reset_bps_cdm_fail || reset_bps_top_fail)
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *bps_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_bps_device_core_info *core_info = NULL;
+ struct cam_bps_device_hw_info *hw_info = NULL;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (cmd_type >= CAM_ICP_BPS_CMD_MAX) {
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
+ return -EINVAL;
+ }
+
+ soc_info = &bps_dev->soc_info;
+ core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+ hw_info = core_info->bps_hw_info;
+
+ switch (cmd_type) {
+ case CAM_ICP_BPS_CMD_VOTE_CPAS: {
+ struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_ICP, "cmd args NULL");
+ return -EINVAL;
+ }
+
+ cam_bps_cpas_vote(core_info, cpas_vote);
+ break;
+ }
+
+ case CAM_ICP_BPS_CMD_CPAS_START: {
+ struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_ICP, "cmd args NULL");
+ return -EINVAL;
+ }
+
+ if (!core_info->cpas_start) {
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &cpas_vote->ahb_vote,
+ &cpas_vote->axi_vote);
+ core_info->cpas_start = true;
+ }
+ break;
+ }
+
+ case CAM_ICP_BPS_CMD_CPAS_STOP:
+ if (core_info->cpas_start) {
+ cam_cpas_stop(core_info->cpas_handle);
+ core_info->cpas_start = false;
+ }
+ break;
+ case CAM_ICP_BPS_CMD_POWER_COLLAPSE:
+ rc = cam_bps_handle_pc(bps_dev);
+ break;
+ case CAM_ICP_BPS_CMD_POWER_RESUME:
+ rc = cam_bps_handle_resume(bps_dev);
+ break;
+ case CAM_ICP_BPS_CMD_UPDATE_CLK: {
+ struct cam_a5_clk_update_cmd *clk_upd_cmd =
+ (struct cam_a5_clk_update_cmd *)cmd_args;
+ uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
+
+ CAM_DBG(CAM_ICP, "bps_src_clk rate = %d", (int)clk_rate);
+
+ if (!core_info->clk_enable) {
+ if (clk_upd_cmd->ipe_bps_pc_enable) {
+ cam_bps_handle_pc(bps_dev);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0x0);
+ }
+ rc = cam_bps_toggle_clk(soc_info, true);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Enable failed");
+ else
+ core_info->clk_enable = true;
+ if (clk_upd_cmd->ipe_bps_pc_enable) {
+ rc = cam_bps_handle_resume(bps_dev);
+ if (rc)
+ CAM_ERR(CAM_ICP, "BPS resume failed");
+ }
+ }
+ CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
+ rc = cam_bps_update_clk_rate(soc_info, clk_rate);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to update clk");
+ }
+ break;
+ case CAM_ICP_BPS_CMD_DISABLE_CLK:
+ if (core_info->clk_enable == true)
+ cam_bps_toggle_clk(soc_info, false);
+ core_info->clk_enable = false;
+ break;
+ case CAM_ICP_BPS_CMD_RESET:
+ rc = cam_bps_cmd_reset(soc_info, core_info);
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+irqreturn_t cam_bps_irq(int irq_num, void *data)
+{
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_core.h
new file mode 100644
index 000000000000..05a769555f9f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_core.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_BPS_CORE_H
+#define CAM_BPS_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+#define BPS_COLLAPSE_MASK 0x1
+#define BPS_PWR_ON_MASK 0x2
+
+struct cam_bps_device_hw_info {
+ uint32_t hw_idx;
+ uint32_t pwr_ctrl;
+ uint32_t pwr_status;
+ uint32_t reserved;
+};
+
+struct cam_bps_device_core_info {
+ struct cam_bps_device_hw_info *bps_hw_info;
+ uint32_t cpas_handle;
+ bool cpas_start;
+ bool clk_enable;
+};
+
+int cam_bps_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_bps_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_bps_irq(int irq_num, void *data);
+#endif /* CAM_BPS_CORE_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_dev.c
new file mode 100644
index 000000000000..56abb4b8e6e9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_dev.c
@@ -0,0 +1,214 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "bps_core.h"
+#include "bps_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_bps_device_hw_info cam_bps_hw_info = {
+ .hw_idx = 0,
+ .pwr_ctrl = 0x5c,
+ .pwr_status = 0x58,
+ .reserved = 0,
+};
+EXPORT_SYMBOL(cam_bps_hw_info);
+
+static char bps_dev_name[8];
+
+static bool cam_bps_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ bool error_handled = false;
+
+ if (!irq_data)
+ return error_handled;
+
+ switch (irq_data->irq_type) {
+ case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ICP,
+ "IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+ irq_data->irq_type,
+ irq_data->u.dec_err.decerr_status.value,
+ irq_data->u.dec_err.decerr_status.thr_err,
+ irq_data->u.dec_err.decerr_status.fcl_err,
+ irq_data->u.dec_err.decerr_status.len_md_err,
+ irq_data->u.dec_err.decerr_status.format_err);
+ error_handled = true;
+ break;
+ case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ICP,
+ "IPE/BPS UBWC Encode error type=%d status=%x",
+ irq_data->irq_type,
+ irq_data->u.enc_err.encerr_status.value);
+ error_handled = true;
+ break;
+ default:
+ break;
+ }
+
+ return error_handled;
+}
+
+int cam_bps_register_cpas(struct cam_hw_soc_info *soc_info,
+ struct cam_bps_device_core_info *core_info,
+ uint32_t hw_idx)
+{
+ struct cam_cpas_register_params cpas_register_params;
+ int rc;
+
+ cpas_register_params.dev = &soc_info->pdev->dev;
+ memcpy(cpas_register_params.identifier, "bps", sizeof("bps"));
+ cpas_register_params.cam_cpas_client_cb = cam_bps_cpas_cb;
+ cpas_register_params.cell_index = hw_idx;
+ cpas_register_params.userdata = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_params);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_handle = cpas_register_params.client_handle;
+
+ return rc;
+}
+
+int cam_bps_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *bps_dev = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_bps_device_core_info *core_info = NULL;
+ struct cam_bps_device_hw_info *hw_info = NULL;
+ int rc = 0;
+
+ bps_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!bps_dev_intf)
+ return -ENOMEM;
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &bps_dev_intf->hw_idx);
+
+ bps_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!bps_dev) {
+ kfree(bps_dev_intf);
+ return -ENOMEM;
+ }
+
+ memset(bps_dev_name, 0, sizeof(bps_dev_name));
+ snprintf(bps_dev_name, sizeof(bps_dev_name),
+ "bps%1u", bps_dev_intf->hw_idx);
+
+ bps_dev->soc_info.pdev = pdev;
+ bps_dev->soc_info.dev = &pdev->dev;
+ bps_dev->soc_info.dev_name = bps_dev_name;
+ bps_dev_intf->hw_priv = bps_dev;
+ bps_dev_intf->hw_ops.init = cam_bps_init_hw;
+ bps_dev_intf->hw_ops.deinit = cam_bps_deinit_hw;
+ bps_dev_intf->hw_ops.process_cmd = cam_bps_process_cmd;
+ bps_dev_intf->hw_type = CAM_ICP_DEV_BPS;
+ platform_set_drvdata(pdev, bps_dev_intf);
+ bps_dev->core_info = kzalloc(sizeof(struct cam_bps_device_core_info),
+ GFP_KERNEL);
+ if (!bps_dev->core_info) {
+ kfree(bps_dev);
+ kfree(bps_dev_intf);
+ return -ENOMEM;
+ }
+ core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_ERR(CAM_ICP, "No bps hardware info");
+ kfree(bps_dev->core_info);
+ kfree(bps_dev);
+ kfree(bps_dev_intf);
+ rc = -EINVAL;
+ return rc;
+ }
+ hw_info = &cam_bps_hw_info;
+ core_info->bps_hw_info = hw_info;
+
+ rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq,
+ bps_dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "failed to init_soc");
+ kfree(bps_dev->core_info);
+ kfree(bps_dev);
+ kfree(bps_dev_intf);
+ return rc;
+ }
+ CAM_DBG(CAM_ICP, "soc info : %pK",
+ (void *)&bps_dev->soc_info);
+
+ rc = cam_bps_register_cpas(&bps_dev->soc_info,
+ core_info, bps_dev_intf->hw_idx);
+ if (rc < 0) {
+ kfree(bps_dev->core_info);
+ kfree(bps_dev);
+ kfree(bps_dev_intf);
+ return rc;
+ }
+ bps_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&bps_dev->hw_mutex);
+ spin_lock_init(&bps_dev->hw_lock);
+ init_completion(&bps_dev->hw_complete);
+ CAM_DBG(CAM_ICP, "BPS%d probe successful",
+ bps_dev_intf->hw_idx);
+
+ return rc;
+}
+
+static const struct of_device_id cam_bps_dt_match[] = {
+ {
+ .compatible = "qcom,cam-bps",
+ .data = &cam_bps_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_bps_dt_match);
+
+static struct platform_driver cam_bps_driver = {
+ .probe = cam_bps_probe,
+ .driver = {
+ .name = "cam-bps",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_bps_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_bps_init_module(void)
+{
+ return platform_driver_register(&cam_bps_driver);
+}
+
+static void __exit cam_bps_exit_module(void)
+{
+ platform_driver_unregister(&cam_bps_driver);
+}
+
+module_init(cam_bps_init_module);
+module_exit(cam_bps_exit_module);
+MODULE_DESCRIPTION("CAM BPS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_soc.c
new file mode 100644
index 000000000000..1da99a4d960d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -0,0 +1,170 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "bps_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ICP, "get bps dt prop is failed");
+
+ return rc;
+}
+
+static int cam_bps_request_platform_resource(
+ struct cam_hw_soc_info *soc_info,
+ irq_handler_t bps_irq_handler, void *irq_data)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_request_platform_resource(soc_info, bps_irq_handler,
+ irq_data);
+
+ return rc;
+}
+
+int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t bps_irq_handler, void *irq_data)
+{
+ int rc = 0;
+
+ rc = cam_bps_get_dt_properties(soc_info);
+ if (rc < 0)
+ return rc;
+
+ rc = cam_bps_request_platform_resource(soc_info, bps_irq_handler,
+ irq_data);
+ if (rc < 0)
+ return rc;
+
+ return rc;
+}
+
+int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, false);
+ if (rc)
+ CAM_ERR(CAM_ICP, "enable platform failed");
+
+ return rc;
+}
+
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clk)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+ false);
+ if (rc)
+ CAM_ERR(CAM_ICP, "disable platform failed");
+
+ return rc;
+}
+
+int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ rc = regulator_set_mode(soc_info->rgltr[i],
+ REGULATOR_MODE_FAST);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+ soc_info->rgltr_name[i]);
+ goto rgltr_set_mode_failed;
+ }
+ }
+ return 0;
+
+rgltr_set_mode_failed:
+ for (i = i - 1; i >= 0; i--)
+ if (soc_info->rgltr[i])
+ regulator_set_mode(soc_info->rgltr[i],
+ REGULATOR_MODE_NORMAL);
+
+ return rc;
+}
+
+int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ rc = regulator_set_mode(soc_info->rgltr[i],
+ REGULATOR_MODE_NORMAL);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+ soc_info->rgltr_name[i]);
+ goto rgltr_set_mode_failed;
+ }
+ }
+ return 0;
+
+rgltr_set_mode_failed:
+ for (i = i - 1; i >= 0; i--)
+ if (soc_info->rgltr[i])
+ regulator_set_mode(soc_info->rgltr[i],
+ REGULATOR_MODE_FAST);
+
+ return rc;
+}
+
+int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_rate)
+{
+ int32_t src_clk_idx;
+
+ if (!soc_info)
+ return -EINVAL;
+
+ src_clk_idx = soc_info->src_clk_idx;
+
+ if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+ (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+ (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+ CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+ clk_rate,
+ soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+ clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+ }
+
+ return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+}
+
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+ int rc = 0;
+
+ if (clk_enable)
+ rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+ else
+ cam_soc_util_clk_disable_default(soc_info);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_soc.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_soc.h
new file mode 100644
index 000000000000..8a670121d0d9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/bps_hw/bps_soc.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_BPS_SOC_H_
+#define _CAM_BPS_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t bps_irq_handler, void *irq_data);
+
+int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clk);
+
+int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_rate);
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
+#endif /* _CAM_BPS_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/Makefile b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/Makefile
new file mode 100644
index 000000000000..179a2c54837f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/fw_inc/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/a5_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
new file mode 100644
index 000000000000..73e42de3fa03
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -0,0 +1,5003 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include <media/cam_cpas.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_bps_hw_intf.h"
+#include "cam_ipe_hw_intf.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "hfi_intf.h"
+#include "hfi_reg.h"
+#include "hfi_session_defs.h"
+#include "hfi_sys_defs.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "a5_core.h"
+#include "hfi_sys_defs.h"
+#include "cam_debug_util.h"
+#include "cam_soc_util.h"
+#include "cam_trace.h"
+#include "cam_cpas_api.h"
+#include "cam_common_util.h"
+
+#define ICP_WORKQ_TASK_CMD_TYPE 1
+#define ICP_WORKQ_TASK_MSG_TYPE 2
+
+#define ICP_DEV_TYPE_TO_CLK_TYPE(dev_type) \
+ ((dev_type == CAM_ICP_RES_TYPE_BPS) ? ICP_CLK_HW_BPS : ICP_CLK_HW_IPE)
+
+#define ICP_DEVICE_IDLE_TIMEOUT 400
+
+static struct cam_icp_hw_mgr icp_hw_mgr;
+
+static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ int rc;
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
+ return -EINVAL;
+ }
+
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_CMD_UBWC_CFG, NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ICP, "CAM_ICP_A5_CMD_UBWC_CFG is failed");
+
+ return rc;
+}
+
+static void cam_icp_hw_mgr_clk_info_update(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct cam_icp_clk_info *hw_mgr_clk_info;
+
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+ else
+ hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+
+ if (hw_mgr_clk_info->base_clk >= ctx_data->clk_info.base_clk)
+ hw_mgr_clk_info->base_clk -= ctx_data->clk_info.base_clk;
+}
+
+static void cam_icp_hw_mgr_reset_clk_info(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int i;
+
+ for (i = 0; i < ICP_CLK_HW_MAX; i++) {
+ hw_mgr->clk_info[i].base_clk = 0;
+ hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
+ hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
+ hw_mgr->clk_info[i].over_clked = 0;
+ hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ }
+ hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
+}
+
+static int cam_icp_get_actual_clk_rate_idx(
+ struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+ int i;
+
+ for (i = 0; i < CAM_MAX_VOTE; i++)
+ if (ctx_data->clk_info.clk_rate[i] >= base_clk)
+ return i;
+
+ /*
+ * Caller has to ensure returned index is within array
+ * size bounds while accessing that index.
+ */
+
+ return i;
+}
+
+static bool cam_icp_is_over_clk(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+ int base_clk_idx;
+ int curr_clk_idx;
+
+ base_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data,
+ hw_mgr_clk_info->base_clk);
+
+ curr_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data,
+ hw_mgr_clk_info->curr_clk);
+
+ CAM_DBG(CAM_ICP, "bc_idx = %d cc_idx = %d %d %d",
+ base_clk_idx, curr_clk_idx, hw_mgr_clk_info->base_clk,
+ hw_mgr_clk_info->curr_clk);
+
+ if (curr_clk_idx > base_clk_idx)
+ return true;
+
+ return false;
+}
+
+static int cam_icp_get_lower_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+ int i;
+
+ i = cam_icp_get_actual_clk_rate_idx(ctx_data, base_clk);
+
+ if (i > 0)
+ return ctx_data->clk_info.clk_rate[i - 1];
+
+ CAM_DBG(CAM_ICP, "Already clk at lower level");
+ return base_clk;
+}
+
+static int cam_icp_get_next_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+ int i;
+
+ i = cam_icp_get_actual_clk_rate_idx(ctx_data, base_clk);
+
+ if (i < CAM_MAX_VOTE - 1)
+ return ctx_data->clk_info.clk_rate[i + 1];
+
+ CAM_DBG(CAM_ICP, "Already clk at higher level");
+
+ return base_clk;
+}
+
+static int cam_icp_get_actual_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+ int i;
+
+ for (i = 0; i < CAM_MAX_VOTE; i++)
+ if (ctx_data->clk_info.clk_rate[i] >= base_clk)
+ return ctx_data->clk_info.clk_rate[i];
+
+ return base_clk;
+}
+
+static int cam_icp_supported_clk_rates(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int i;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_hw_intf *dev_intf = NULL;
+ struct cam_hw_info *dev = NULL;
+
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ dev_intf = hw_mgr->bps_dev_intf;
+ else
+ dev_intf = hw_mgr->ipe0_dev_intf;
+
+ if (!dev_intf) {
+ CAM_ERR(CAM_ICP, "dev_intf is invalid");
+ return -EINVAL;
+ }
+ dev = (struct cam_hw_info *)dev_intf->hw_priv;
+ soc_info = &dev->soc_info;
+
+ for (i = 0; i < CAM_MAX_VOTE; i++) {
+ ctx_data->clk_info.clk_rate[i] =
+ soc_info->clk_rate[i][soc_info->src_clk_idx];
+ CAM_DBG(CAM_ICP, "clk_info[%d] = %d",
+ i, ctx_data->clk_info.clk_rate[i]);
+ }
+
+ return 0;
+}
+
+static int cam_icp_clk_idx_from_req_id(struct cam_icp_hw_ctx_data *ctx_data,
+ uint64_t req_id)
+{
+ struct hfi_frame_process_info *frame_process;
+ int i;
+
+ frame_process = &ctx_data->hfi_frame_process;
+
+ for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+ if (frame_process->request_id[i] == req_id)
+ return i;
+
+ return 0;
+}
+
+static int cam_icp_ctx_clk_info_init(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ ctx_data->clk_info.curr_fc = 0;
+ ctx_data->clk_info.base_clk = 0;
+ ctx_data->clk_info.uncompressed_bw = 0;
+ ctx_data->clk_info.compressed_bw = 0;
+ cam_icp_supported_clk_rates(&icp_hw_mgr, ctx_data);
+
+ return 0;
+}
+
+static bool cam_icp_frame_pending(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ return !bitmap_empty(ctx_data->hfi_frame_process.bitmap,
+ CAM_FRAME_CMD_MAX);
+}
+
+static int cam_icp_ctx_timer_reset(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ if (ctx_data && ctx_data->watch_dog) {
+ ctx_data->watch_dog_reset_counter++;
+ CAM_DBG(CAM_ICP, "reset timer : ctx_id = %d, counter=%d",
+ ctx_data->ctx_id, ctx_data->watch_dog_reset_counter);
+ crm_timer_reset(ctx_data->watch_dog);
+ }
+
+ return 0;
+}
+
+static void cam_icp_device_timer_reset(struct cam_icp_hw_mgr *hw_mgr,
+ int device_index)
+{
+ if ((device_index >= ICP_CLK_HW_MAX) || (!hw_mgr))
+ return;
+
+ if (hw_mgr->clk_info[device_index].watch_dog) {
+ CAM_DBG(CAM_ICP, "reset timer : device_index = %d",
+ device_index);
+ crm_timer_reset(hw_mgr->clk_info[device_index].watch_dog);
+ hw_mgr->clk_info[device_index].watch_dog_reset_counter++;
+ }
+}
+
+static int32_t cam_icp_deinit_idle_clk(void *priv, void *data)
+{
+ struct cam_icp_hw_mgr *hw_mgr = (struct cam_icp_hw_mgr *)priv;
+ struct clk_work_data *task_data = (struct clk_work_data *)data;
+ struct cam_icp_clk_info *clk_info =
+ (struct cam_icp_clk_info *)task_data->data;
+ uint32_t id;
+ uint32_t i;
+ struct cam_icp_hw_ctx_data *ctx_data;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+ struct cam_hw_intf *dev_intf = NULL;
+ struct cam_a5_clk_update_cmd clk_upd_cmd;
+ int rc = 0;
+ bool busy = false;
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ clk_info->base_clk = 0;
+ clk_info->curr_clk = 0;
+ clk_info->over_clked = 0;
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+ ctx_data = &hw_mgr->ctx_data[i];
+ mutex_lock(&ctx_data->ctx_mutex);
+ if ((ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED) &&
+ (ICP_DEV_TYPE_TO_CLK_TYPE(
+ ctx_data->icp_dev_acquire_info->dev_type)
+ == clk_info->hw_type)) {
+ busy = cam_icp_frame_pending(ctx_data);
+ if (busy) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ break;
+ }
+ cam_icp_ctx_clk_info_init(ctx_data);
+ }
+ mutex_unlock(&ctx_data->ctx_mutex);
+ }
+
+ if (busy) {
+ cam_icp_device_timer_reset(hw_mgr, clk_info->hw_type);
+ rc = -EBUSY;
+ goto done;
+ }
+
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ if (clk_info->hw_type == ICP_CLK_HW_BPS) {
+ dev_intf = bps_dev_intf;
+ id = CAM_ICP_BPS_CMD_DISABLE_CLK;
+ } else if (clk_info->hw_type == ICP_CLK_HW_IPE) {
+ dev_intf = ipe0_dev_intf;
+ id = CAM_ICP_IPE_CMD_DISABLE_CLK;
+ } else {
+ CAM_ERR(CAM_ICP, "Error");
+ goto done;
+ }
+
+ CAM_DBG(CAM_ICP, "Disable %d", clk_info->hw_type);
+
+ clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
+ dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+ &clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
+
+ if (clk_info->hw_type != ICP_CLK_HW_BPS)
+ if (ipe1_dev_intf)
+ ipe1_dev_intf->hw_ops.process_cmd(
+ ipe1_dev_intf->hw_priv, id,
+ &clk_upd_cmd,
+ sizeof(struct cam_a5_clk_update_cmd));
+
+done:
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+}
+
+static int32_t cam_icp_ctx_timer(void *priv, void *data)
+{
+ struct clk_work_data *task_data = (struct clk_work_data *)data;
+ struct cam_icp_hw_ctx_data *ctx_data =
+ (struct cam_icp_hw_ctx_data *)task_data->data;
+ struct cam_icp_hw_mgr *hw_mgr = &icp_hw_mgr;
+ uint32_t id;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+ struct cam_hw_intf *dev_intf = NULL;
+ struct cam_icp_clk_info *clk_info;
+ struct cam_icp_cpas_vote clk_update;
+
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "ctx_data is NULL, failed to update clk");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ if ((ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) ||
+ (ctx_data->watch_dog_reset_counter == 0)) {
+ CAM_DBG(CAM_ICP, "state %d, counter=%d",
+ ctx_data->state, ctx_data->watch_dog_reset_counter);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return 0;
+ }
+
+ if (cam_icp_frame_pending(ctx_data)) {
+ cam_icp_ctx_timer_reset(ctx_data);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EBUSY;
+ }
+
+ CAM_DBG(CAM_ICP,
+ "E :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+ ctx_data->ctx_id,
+ ctx_data->clk_info.uncompressed_bw,
+ ctx_data->clk_info.compressed_bw,
+ ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EINVAL;
+ }
+
+ if (!ctx_data->icp_dev_acquire_info) {
+ CAM_WARN(CAM_ICP, "NULL acquire info");
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EINVAL;
+ }
+
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+ dev_intf = bps_dev_intf;
+ clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+ id = CAM_ICP_BPS_CMD_VOTE_CPAS;
+ } else {
+ dev_intf = ipe0_dev_intf;
+ clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+ id = CAM_ICP_IPE_CMD_VOTE_CPAS;
+ }
+
+ clk_info->compressed_bw -= ctx_data->clk_info.compressed_bw;
+ clk_info->uncompressed_bw -= ctx_data->clk_info.uncompressed_bw;
+ ctx_data->clk_info.uncompressed_bw = 0;
+ ctx_data->clk_info.compressed_bw = 0;
+ ctx_data->clk_info.curr_fc = 0;
+ ctx_data->clk_info.base_clk = 0;
+
+ clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+ clk_update.ahb_vote.vote.freq = 0;
+ clk_update.ahb_vote_valid = false;
+ clk_update.axi_vote.compressed_bw = clk_info->compressed_bw;
+ clk_update.axi_vote.uncompressed_bw = clk_info->uncompressed_bw;
+ clk_update.axi_vote_valid = true;
+ dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+ &clk_update, sizeof(clk_update));
+
+ CAM_DBG(CAM_ICP,
+ "X :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+ ctx_data->ctx_id,
+ ctx_data->clk_info.uncompressed_bw,
+ ctx_data->clk_info.compressed_bw,
+ ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+ mutex_unlock(&ctx_data->ctx_mutex);
+
+ return 0;
+}
+
+static void cam_icp_ctx_timer_cb(unsigned long data)
+{
+ unsigned long flags;
+ struct crm_workq_task *task;
+ struct clk_work_data *task_data;
+ struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+ spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.timer_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+ return;
+ }
+
+ task_data = (struct clk_work_data *)task->payload;
+ task_data->data = timer->parent;
+ task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_icp_ctx_timer;
+ cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
+static void cam_icp_device_timer_cb(unsigned long data)
+{
+ unsigned long flags;
+ struct crm_workq_task *task;
+ struct clk_work_data *task_data;
+ struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+ spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.timer_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+ return;
+ }
+
+ task_data = (struct clk_work_data *)task->payload;
+ task_data->data = timer->parent;
+ task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_icp_deinit_idle_clk;
+ cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
+static int cam_icp_clk_info_init(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int i;
+
+ for (i = 0; i < ICP_CLK_HW_MAX; i++) {
+ hw_mgr->clk_info[i].base_clk = ICP_CLK_SVS_HZ;
+ hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
+ hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
+ hw_mgr->clk_info[i].over_clked = 0;
+ hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ hw_mgr->clk_info[i].hw_type = i;
+ hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
+ }
+ hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
+
+ return 0;
+}
+
+static int cam_icp_ctx_timer_start(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int rc = 0;
+
+ rc = crm_timer_init(&ctx_data->watch_dog,
+ 200, ctx_data, &cam_icp_ctx_timer_cb);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to start timer");
+
+ ctx_data->watch_dog_reset_counter = 0;
+
+ CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+ return rc;
+}
+
+static int cam_icp_device_timer_start(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < ICP_CLK_HW_MAX; i++) {
+ if (!hw_mgr->clk_info[i].watch_dog) {
+ rc = crm_timer_init(&hw_mgr->clk_info[i].watch_dog,
+ ICP_DEVICE_IDLE_TIMEOUT, &hw_mgr->clk_info[i],
+ &cam_icp_device_timer_cb);
+
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to start timer %d", i);
+
+ hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int cam_icp_ctx_timer_stop(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ if (ctx_data->watch_dog) {
+ CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+ ctx_data->watch_dog_reset_counter = 0;
+ crm_timer_exit(&ctx_data->watch_dog);
+ ctx_data->watch_dog = NULL;
+ }
+
+ return 0;
+}
+
+static void cam_icp_device_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+{
+ if (!hw_mgr->bps_ctxt_cnt &&
+ hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog) {
+ hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog_reset_counter = 0;
+ crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
+ hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog = NULL;
+ }
+
+ if (!hw_mgr->ipe_ctxt_cnt &&
+ hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog) {
+ hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog_reset_counter = 0;
+ crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
+ hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog = NULL;
+ }
+}
+
+static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles,
+ uint64_t budget)
+{
+ uint64_t base_clk;
+ uint64_t mul = 1000000000;
+
+ base_clk = (frame_cycles * mul) / budget;
+
+ CAM_DBG(CAM_ICP, "budget = %lld fc = %d ib = %lld base_clk = %lld",
+ budget, frame_cycles,
+ (long long int)(frame_cycles * mul), base_clk);
+
+ return base_clk;
+}
+
+static bool cam_icp_busy_prev_reqs(struct hfi_frame_process_info *frm_process,
+ uint64_t req_id)
+{
+ int i;
+ int cnt;
+
+ for (i = 0, cnt = 0; i < CAM_FRAME_CMD_MAX; i++) {
+ if (frm_process->request_id[i]) {
+ if (frm_process->fw_process_flag[i]) {
+ CAM_DBG(CAM_ICP, "r id = %lld busy = %d",
+ frm_process->request_id[i],
+ frm_process->fw_process_flag[i]);
+ cnt++;
+ }
+ }
+ }
+ if (cnt > 1)
+ return true;
+
+ return false;
+}
+
+static int cam_icp_calc_total_clk(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_clk_info *hw_mgr_clk_info, uint32_t dev_type)
+{
+ int i;
+ struct cam_icp_hw_ctx_data *ctx_data;
+
+ hw_mgr_clk_info->base_clk = 0;
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+ ctx_data = &hw_mgr->ctx_data[i];
+ if (ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED &&
+ ICP_DEV_TYPE_TO_CLK_TYPE(
+ ctx_data->icp_dev_acquire_info->dev_type) ==
+ ICP_DEV_TYPE_TO_CLK_TYPE(dev_type))
+ hw_mgr_clk_info->base_clk +=
+ ctx_data->clk_info.base_clk;
+ }
+
+ return 0;
+}
+
+static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_icp_clk_info *hw_mgr_clk_info,
+ struct cam_icp_clk_bw_request *clk_info,
+ uint32_t base_clk)
+{
+ uint32_t next_clk_level;
+ uint32_t actual_clk;
+ bool rc = false;
+
+ /* 1. if current request frame cycles(fc) are more than previous
+ * frame fc
+ * Calculate the new base clock.
+ * if sum of base clocks are more than next available clk level
+ * Update clock rate, change curr_clk_rate to sum of base clock
+ * rates and make over_clked to zero
+ * else
+ * Update clock rate to next level, update curr_clk_rate and make
+ * overclked cnt to zero
+ * 2. if current fc is less than or equal to previous frame fc
+ * Still Bump up the clock to next available level
+ * if it is available, then update clock, make overclk cnt to
+ * zero. If the clock is already at highest clock rate then
+ * no need to update the clock
+ */
+ ctx_data->clk_info.base_clk = base_clk;
+ hw_mgr_clk_info->over_clked = 0;
+ if (clk_info->frame_cycles > ctx_data->clk_info.curr_fc) {
+ cam_icp_calc_total_clk(hw_mgr, hw_mgr_clk_info,
+ ctx_data->icp_dev_acquire_info->dev_type);
+ actual_clk = cam_icp_get_actual_clk_rate(hw_mgr,
+ ctx_data, base_clk);
+ if (hw_mgr_clk_info->base_clk > actual_clk) {
+ hw_mgr_clk_info->curr_clk = hw_mgr_clk_info->base_clk;
+ } else {
+ next_clk_level = cam_icp_get_next_clk_rate(hw_mgr,
+ ctx_data, hw_mgr_clk_info->curr_clk);
+ hw_mgr_clk_info->curr_clk = next_clk_level;
+ }
+ rc = true;
+ } else {
+ next_clk_level =
+ cam_icp_get_next_clk_rate(hw_mgr, ctx_data,
+ hw_mgr_clk_info->curr_clk);
+ if (hw_mgr_clk_info->curr_clk < next_clk_level) {
+ hw_mgr_clk_info->curr_clk = next_clk_level;
+ rc = true;
+ }
+ }
+ ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
+
+ return rc;
+}
+
+static bool cam_icp_update_clk_overclk_free(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_icp_clk_info *hw_mgr_clk_info,
+ struct cam_icp_clk_bw_request *clk_info,
+ uint32_t base_clk)
+{
+ int rc = false;
+
+ /*
+ * In caseof no pending packets case
+ * 1. In caseof overclk cnt is less than threshold, increase
+ * overclk count and no update in the clock rate
+ * 2. In caseof overclk cnt is greater than or equal to threshold
+ * then lower clock rate by one level and update hw_mgr current
+ * clock value.
+ * a. In case of new clock rate greater than sum of clock
+ * rates, reset overclk count value to zero if it is
+ * overclock
+ * b. if it is less than sum of base clocks then go to next
+ * level of clock and make overclk count to zero
+ * c. if it is same as sum of base clock rates update overclock
+ * cnt to 0
+ */
+ if (hw_mgr_clk_info->over_clked < hw_mgr_clk_info->threshold) {
+ hw_mgr_clk_info->over_clked++;
+ rc = false;
+ } else {
+ hw_mgr_clk_info->curr_clk =
+ cam_icp_get_lower_clk_rate(hw_mgr, ctx_data,
+ hw_mgr_clk_info->curr_clk);
+ if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) {
+ if (cam_icp_is_over_clk(hw_mgr, ctx_data,
+ hw_mgr_clk_info))
+ hw_mgr_clk_info->over_clked = 0;
+ } else if (hw_mgr_clk_info->curr_clk <
+ hw_mgr_clk_info->base_clk) {
+ hw_mgr_clk_info->curr_clk =
+ cam_icp_get_next_clk_rate(hw_mgr, ctx_data,
+ hw_mgr_clk_info->curr_clk);
+ hw_mgr_clk_info->over_clked = 0;
+ } else if (hw_mgr_clk_info->curr_clk ==
+ hw_mgr_clk_info->base_clk) {
+ hw_mgr_clk_info->over_clked = 0;
+ }
+ rc = true;
+ }
+
+ return rc;
+}
+
+static bool cam_icp_update_clk_free(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_icp_clk_info *hw_mgr_clk_info,
+ struct cam_icp_clk_bw_request *clk_info,
+ uint32_t base_clk)
+{
+ int rc = false;
+ bool over_clocked = false;
+
+ ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
+ ctx_data->clk_info.base_clk = base_clk;
+ cam_icp_calc_total_clk(hw_mgr, hw_mgr_clk_info,
+ ctx_data->icp_dev_acquire_info->dev_type);
+
+ /*
+ * Current clock is not always sum of base clocks, due to
+ * clock scales update to next higher or lower levels, it
+ * equals to one of discrete clock values supported by hardware.
+ * So even current clock is higher than sum of base clocks, we
+ * can not consider it is over clocked. if it is greater than
+ * discrete clock level then only it is considered as over clock.
+ * 1. Handle over clock case
+ * 2. If current clock is less than sum of base clocks
+ * update current clock
+ * 3. If current clock is same as sum of base clocks no action
+ */
+
+ over_clocked = cam_icp_is_over_clk(hw_mgr, ctx_data,
+ hw_mgr_clk_info);
+
+ if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk &&
+ over_clocked) {
+ rc = cam_icp_update_clk_overclk_free(hw_mgr, ctx_data,
+ hw_mgr_clk_info, clk_info, base_clk);
+ } else if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) {
+ hw_mgr_clk_info->over_clked = 0;
+ rc = false;
+ } else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) {
+ hw_mgr_clk_info->curr_clk = cam_icp_get_actual_clk_rate(hw_mgr,
+ ctx_data, hw_mgr_clk_info->base_clk);
+ rc = true;
+ }
+
+ return rc;
+}
+
+static bool cam_icp_debug_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+ if (icp_hw_mgr.icp_debug_clk < ICP_CLK_TURBO_HZ &&
+ icp_hw_mgr.icp_debug_clk &&
+ icp_hw_mgr.icp_debug_clk != hw_mgr_clk_info->curr_clk) {
+ hw_mgr_clk_info->base_clk = icp_hw_mgr.icp_debug_clk;
+ hw_mgr_clk_info->curr_clk = icp_hw_mgr.icp_debug_clk;
+ hw_mgr_clk_info->uncompressed_bw = icp_hw_mgr.icp_debug_clk;
+ hw_mgr_clk_info->compressed_bw = icp_hw_mgr.icp_debug_clk;
+ CAM_DBG(CAM_ICP, "bc = %d cc = %d",
+ hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk);
+ return true;
+ }
+
+ return false;
+}
+
+static bool cam_icp_default_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+ if (icp_hw_mgr.icp_default_clk != hw_mgr_clk_info->curr_clk) {
+ hw_mgr_clk_info->base_clk = icp_hw_mgr.icp_default_clk;
+ hw_mgr_clk_info->curr_clk = icp_hw_mgr.icp_default_clk;
+ hw_mgr_clk_info->uncompressed_bw = icp_hw_mgr.icp_default_clk;
+ hw_mgr_clk_info->compressed_bw = icp_hw_mgr.icp_default_clk;
+ CAM_DBG(CAM_ICP, "bc = %d cc = %d",
+ hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk);
+ return true;
+ }
+
+ return false;
+}
+
+static bool cam_icp_update_bw(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_icp_clk_info *hw_mgr_clk_info,
+ struct cam_icp_clk_bw_request *clk_info,
+ bool busy)
+{
+ int i;
+ struct cam_icp_hw_ctx_data *ctx;
+
+ /*
+ * If current request bandwidth is different from previous frames, then
+ * recalculate bandwidth of all contexts of same hardware and update
+ * voting of bandwidth
+ */
+ CAM_DBG(CAM_ICP, "ubw ctx = %lld clk_info ubw = %lld busy = %d",
+ ctx_data->clk_info.uncompressed_bw,
+ clk_info->uncompressed_bw, busy);
+
+ if ((clk_info->uncompressed_bw == ctx_data->clk_info.uncompressed_bw) &&
+ (ctx_data->clk_info.uncompressed_bw ==
+ hw_mgr_clk_info->uncompressed_bw))
+ return false;
+
+ if (busy &&
+ ctx_data->clk_info.uncompressed_bw > clk_info->uncompressed_bw)
+ return false;
+
+ ctx_data->clk_info.uncompressed_bw = clk_info->uncompressed_bw;
+ ctx_data->clk_info.compressed_bw = clk_info->compressed_bw;
+ hw_mgr_clk_info->uncompressed_bw = 0;
+ hw_mgr_clk_info->compressed_bw = 0;
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+ ctx = &hw_mgr->ctx_data[i];
+ if (ctx->state == CAM_ICP_CTX_STATE_ACQUIRED &&
+ ICP_DEV_TYPE_TO_CLK_TYPE(
+ ctx->icp_dev_acquire_info->dev_type) ==
+ ICP_DEV_TYPE_TO_CLK_TYPE(
+ ctx_data->icp_dev_acquire_info->dev_type)) {
+ hw_mgr_clk_info->uncompressed_bw +=
+ ctx->clk_info.uncompressed_bw;
+ hw_mgr_clk_info->compressed_bw +=
+ ctx->clk_info.compressed_bw;
+ CAM_DBG(CAM_ICP, "ubw = %lld, cbw = %lld",
+ hw_mgr_clk_info->uncompressed_bw,
+ hw_mgr_clk_info->compressed_bw);
+ }
+ }
+
+ return true;
+}
+
+static bool cam_icp_check_clk_update(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+ bool busy, rc = false;
+ uint32_t base_clk;
+ struct cam_icp_clk_bw_request *clk_info;
+ struct hfi_frame_process_info *frame_info;
+ uint64_t req_id;
+ struct cam_icp_clk_info *hw_mgr_clk_info;
+
+ cam_icp_ctx_timer_reset(ctx_data);
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+ cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_BPS);
+ hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+ CAM_DBG(CAM_ICP, "Reset bps timer");
+ } else {
+ cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_IPE);
+ hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+ CAM_DBG(CAM_ICP, "Reset ipe timer");
+ }
+
+ if (icp_hw_mgr.icp_debug_clk)
+ return cam_icp_debug_clk_update(hw_mgr_clk_info);
+
+ /* Check is there any pending frames in this context */
+ frame_info = &ctx_data->hfi_frame_process;
+ req_id = frame_info->request_id[idx];
+ busy = cam_icp_busy_prev_reqs(frame_info, req_id);
+ CAM_DBG(CAM_ICP, "busy = %d req_id = %lld", busy, req_id);
+
+ clk_info = &ctx_data->hfi_frame_process.clk_info[idx];
+ if (!clk_info->frame_cycles)
+ return cam_icp_default_clk_update(hw_mgr_clk_info);
+
+ /* Calculate base clk rate */
+ base_clk = cam_icp_mgr_calc_base_clk(
+ clk_info->frame_cycles, clk_info->budget_ns);
+ ctx_data->clk_info.rt_flag = clk_info->rt_flag;
+
+ if (busy)
+ rc = cam_icp_update_clk_busy(hw_mgr, ctx_data,
+ hw_mgr_clk_info, clk_info, base_clk);
+ else
+ rc = cam_icp_update_clk_free(hw_mgr, ctx_data,
+ hw_mgr_clk_info, clk_info, base_clk);
+
+ CAM_DBG(CAM_ICP, "bc = %d cc = %d busy = %d overclk = %d uc = %d",
+ hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk,
+ busy, hw_mgr_clk_info->over_clked, rc);
+
+ return rc;
+}
+
+static bool cam_icp_check_bw_update(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+ bool busy, rc = false;
+ struct cam_icp_clk_bw_request *clk_info;
+ struct cam_icp_clk_info *hw_mgr_clk_info;
+ struct hfi_frame_process_info *frame_info;
+ uint64_t req_id;
+
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+ else
+ hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+
+ clk_info = &ctx_data->hfi_frame_process.clk_info[idx];
+ frame_info = &ctx_data->hfi_frame_process;
+ req_id = frame_info->request_id[idx];
+ busy = cam_icp_busy_prev_reqs(frame_info, req_id);
+ rc = cam_icp_update_bw(hw_mgr, ctx_data, hw_mgr_clk_info,
+ clk_info, busy);
+
+ CAM_DBG(CAM_ICP, "ubw = %lld, cbw = %lld, update_bw = %d",
+ hw_mgr_clk_info->uncompressed_bw,
+ hw_mgr_clk_info->compressed_bw, rc);
+
+ return rc;
+}
+
+static int cam_icp_update_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ uint32_t id;
+ uint32_t curr_clk_rate;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+ struct cam_hw_intf *dev_intf = NULL;
+ struct cam_a5_clk_update_cmd clk_upd_cmd;
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+ return -EINVAL;
+ }
+
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+ dev_intf = bps_dev_intf;
+ curr_clk_rate = hw_mgr->clk_info[ICP_CLK_HW_BPS].curr_clk;
+ id = CAM_ICP_BPS_CMD_UPDATE_CLK;
+ } else {
+ dev_intf = ipe0_dev_intf;
+ curr_clk_rate = hw_mgr->clk_info[ICP_CLK_HW_IPE].curr_clk;
+ id = CAM_ICP_IPE_CMD_UPDATE_CLK;
+ }
+
+ CAM_DBG(CAM_PERF, "clk_rate %u for dev_type %d", curr_clk_rate,
+ ctx_data->icp_dev_acquire_info->dev_type);
+ clk_upd_cmd.curr_clk_rate = curr_clk_rate;
+ clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
+ dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+ &clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
+
+ if (ctx_data->icp_dev_acquire_info->dev_type != CAM_ICP_RES_TYPE_BPS)
+ if (ipe1_dev_intf)
+ ipe1_dev_intf->hw_ops.process_cmd(
+ ipe1_dev_intf->hw_priv, id,
+ &clk_upd_cmd,
+ sizeof(struct cam_a5_clk_update_cmd));
+
+ return 0;
+}
+
+static int cam_icp_update_cpas_vote(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ uint32_t id;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+ struct cam_hw_intf *dev_intf = NULL;
+ struct cam_icp_clk_info *clk_info;
+ struct cam_icp_cpas_vote clk_update;
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+ return -EINVAL;
+ }
+
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+ dev_intf = bps_dev_intf;
+ clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+ id = CAM_ICP_BPS_CMD_VOTE_CPAS;
+ } else {
+ dev_intf = ipe0_dev_intf;
+ clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+ id = CAM_ICP_IPE_CMD_VOTE_CPAS;
+ }
+
+ clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+ clk_update.ahb_vote.vote.freq = 0;
+ clk_update.ahb_vote_valid = false;
+ clk_update.axi_vote.compressed_bw = clk_info->compressed_bw;
+ clk_update.axi_vote.uncompressed_bw = clk_info->uncompressed_bw;
+ clk_update.axi_vote_valid = true;
+ dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+ &clk_update, sizeof(clk_update));
+
+ /*
+ * Consolidated bw needs to be voted on only one IPE client. Otherwise
+ * total bw that we vote at bus client would be doubled. So either
+ * remove voting on IPE1 or divide the vote for each IPE client
+ * and vote to cpas - cpas will add up and vote full bw to sf client
+ * anyway.
+ */
+
+ CAM_DBG(CAM_ICP, "compress_bw %llu uncompress_bw %llu dev_type %d",
+ clk_info->compressed_bw, clk_info->uncompressed_bw,
+ ctx_data->icp_dev_acquire_info->dev_type);
+
+ return 0;
+}
+
+static int cam_icp_mgr_ipe_bps_clk_update(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+ int rc = 0;
+
+ if (cam_icp_check_clk_update(hw_mgr, ctx_data, idx))
+ rc = cam_icp_update_clk_rate(hw_mgr, ctx_data);
+
+ if (cam_icp_check_bw_update(hw_mgr, ctx_data, idx))
+ rc |= cam_icp_update_cpas_vote(hw_mgr, ctx_data);
+
+ return rc;
+}
+
+static int cam_icp_mgr_ipe_bps_resume(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+ uint32_t core_info_mask = 0;
+ int rc = 0;
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+ return -EINVAL;
+ }
+
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+ if (hw_mgr->bps_ctxt_cnt++)
+ goto end;
+ if (!hw_mgr->bps_clk_state) {
+ bps_dev_intf->hw_ops.init(
+ bps_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->bps_clk_state = true;
+ }
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
+ bps_dev_intf->hw_ops.process_cmd(
+ bps_dev_intf->hw_priv,
+ CAM_ICP_BPS_CMD_POWER_RESUME, NULL, 0);
+ }
+ core_info_mask = ICP_PWR_CLP_BPS;
+ } else {
+ if (hw_mgr->ipe_ctxt_cnt++)
+ goto end;
+ if (!hw_mgr->ipe_clk_state)
+ ipe0_dev_intf->hw_ops.init(
+ ipe0_dev_intf->hw_priv, NULL, 0);
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
+ ipe0_dev_intf->hw_ops.process_cmd(
+ ipe0_dev_intf->hw_priv,
+ CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
+ }
+
+ if ((icp_hw_mgr.ipe1_enable) &&
+ (ipe1_dev_intf) &&
+ (!hw_mgr->ipe_clk_state)) {
+ ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
+ NULL, 0);
+
+ if (icp_hw_mgr.ipe_bps_pc_flag) {
+ ipe1_dev_intf->hw_ops.process_cmd(
+ ipe1_dev_intf->hw_priv,
+ CAM_ICP_IPE_CMD_POWER_RESUME,
+ NULL, 0);
+ }
+ }
+ hw_mgr->ipe_clk_state = true;
+
+ if ((icp_hw_mgr.ipe1_enable) &&
+ (ipe1_dev_intf))
+ core_info_mask = (ICP_PWR_CLP_IPE0 |
+ ICP_PWR_CLP_IPE1);
+ else
+ core_info_mask = ICP_PWR_CLP_IPE0;
+ }
+
+ CAM_DBG(CAM_ICP, "core_info %X", core_info_mask);
+ if (icp_hw_mgr.ipe_bps_pc_flag)
+ rc = hfi_enable_ipe_bps_pc(true, core_info_mask);
+ else
+ rc = hfi_enable_ipe_bps_pc(false, core_info_mask);
+end:
+ return rc;
+}
+
+static int cam_icp_mgr_ipe_bps_power_collapse(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data, int dev_type)
+{
+ int rc = 0, dev;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+ return -EINVAL;
+ }
+
+ if (!ctx_data)
+ dev = dev_type;
+ else
+ dev = ctx_data->icp_dev_acquire_info->dev_type;
+
+ if (dev == CAM_ICP_RES_TYPE_BPS) {
+ CAM_DBG(CAM_ICP, "bps ctx cnt %d", hw_mgr->bps_ctxt_cnt);
+ if (ctx_data)
+ --hw_mgr->bps_ctxt_cnt;
+
+ if (hw_mgr->bps_ctxt_cnt)
+ goto end;
+
+ if (icp_hw_mgr.ipe_bps_pc_flag &&
+ !atomic_read(&hw_mgr->recovery)) {
+ rc = bps_dev_intf->hw_ops.process_cmd(
+ bps_dev_intf->hw_priv,
+ CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+ NULL, 0);
+ }
+
+ if (hw_mgr->bps_clk_state) {
+ bps_dev_intf->hw_ops.deinit
+ (bps_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->bps_clk_state = false;
+ }
+ } else {
+ CAM_DBG(CAM_ICP, "ipe ctx cnt %d", hw_mgr->ipe_ctxt_cnt);
+ if (ctx_data)
+ --hw_mgr->ipe_ctxt_cnt;
+
+ if (hw_mgr->ipe_ctxt_cnt)
+ goto end;
+
+ if (icp_hw_mgr.ipe_bps_pc_flag &&
+ !atomic_read(&hw_mgr->recovery)) {
+ rc = ipe0_dev_intf->hw_ops.process_cmd(
+ ipe0_dev_intf->hw_priv,
+ CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
+ }
+
+ if (hw_mgr->ipe_clk_state)
+ ipe0_dev_intf->hw_ops.deinit(
+ ipe0_dev_intf->hw_priv, NULL, 0);
+
+ if (ipe1_dev_intf) {
+ if (icp_hw_mgr.ipe_bps_pc_flag &&
+ !atomic_read(&hw_mgr->recovery)) {
+ rc = ipe1_dev_intf->hw_ops.process_cmd(
+ ipe1_dev_intf->hw_priv,
+ CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+ NULL, 0);
+ }
+
+ if (hw_mgr->ipe_clk_state)
+ ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+ NULL, 0);
+ }
+
+ hw_mgr->ipe_clk_state = false;
+ }
+
+end:
+ return rc;
+}
+
+static int cam_icp_set_dbg_default_clk(void *data, u64 val)
+{
+ icp_hw_mgr.icp_debug_clk = val;
+ return 0;
+}
+
+static int cam_icp_get_dbg_default_clk(void *data, u64 *val)
+{
+ *val = icp_hw_mgr.icp_debug_clk;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_default_clk,
+ cam_icp_get_dbg_default_clk,
+ cam_icp_set_dbg_default_clk, "%16llu");
+
+static int cam_icp_set_a5_dbg_lvl(void *data, u64 val)
+{
+ icp_hw_mgr.a5_dbg_lvl = val;
+ return 0;
+}
+
+static int cam_icp_get_a5_dbg_lvl(void *data, u64 *val)
+{
+ *val = icp_hw_mgr.a5_dbg_lvl;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_fs, cam_icp_get_a5_dbg_lvl,
+ cam_icp_set_a5_dbg_lvl, "%08llu");
+
+static int cam_icp_set_a5_dbg_type(void *data, u64 val)
+{
+ if (val <= NUM_HFI_DEBUG_MODE)
+ icp_hw_mgr.a5_debug_type = val;
+ return 0;
+}
+
+static int cam_icp_get_a5_dbg_type(void *data, u64 *val)
+{
+ *val = icp_hw_mgr.a5_debug_type;
+ return 0;
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_type_fs, cam_icp_get_a5_dbg_type,
+ cam_icp_set_a5_dbg_type, "%08llu");
+
+static int cam_icp_set_a5_fw_dump_lvl(void *data, u64 val)
+{
+ if (val < NUM_HFI_DUMP_LVL)
+ icp_hw_mgr.a5_fw_dump_lvl = val;
+ return 0;
+}
+
+static int cam_icp_get_a5_fw_dump_lvl(void *data, u64 *val)
+{
+ *val = icp_hw_mgr.a5_fw_dump_lvl;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_fw_dump, cam_icp_get_a5_fw_dump_lvl,
+ cam_icp_set_a5_fw_dump_lvl, "%08llu");
+
+static int cam_icp_hw_mgr_create_debugfs_entry(void)
+{
+ int rc = 0;
+
+ icp_hw_mgr.dentry = debugfs_create_dir("camera_icp", NULL);
+ if (!icp_hw_mgr.dentry)
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("icp_pc",
+ 0644,
+ icp_hw_mgr.dentry,
+ &icp_hw_mgr.icp_pc_flag)) {
+ CAM_ERR(CAM_ICP, "failed to create icp_pc entry");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (!debugfs_create_bool("ipe_bps_pc",
+ 0644,
+ icp_hw_mgr.dentry,
+ &icp_hw_mgr.ipe_bps_pc_flag)) {
+ CAM_ERR(CAM_ICP, "failed to create ipe_bps_pc entry");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (!debugfs_create_file("icp_debug_clk",
+ 0644,
+ icp_hw_mgr.dentry, NULL,
+ &cam_icp_debug_default_clk)) {
+ CAM_ERR(CAM_ICP, "failed to create icp_debug_clk entry");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (!debugfs_create_bool("a5_jtag_debug",
+ 0644,
+ icp_hw_mgr.dentry,
+ &icp_hw_mgr.a5_jtag_debug)) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (!debugfs_create_file("a5_debug_type",
+ 0644,
+ icp_hw_mgr.dentry,
+ NULL, &cam_icp_debug_type_fs)) {
+ CAM_ERR(CAM_ICP, "failed to create a5_debug_type");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (!debugfs_create_file("a5_debug_lvl",
+ 0644,
+ icp_hw_mgr.dentry,
+ NULL, &cam_icp_debug_fs)) {
+ CAM_ERR(CAM_ICP, "failed to create a5_dbg_lvl");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (!debugfs_create_file("a5_fw_dump_lvl",
+ 0644,
+ icp_hw_mgr.dentry,
+ NULL, &cam_icp_debug_fw_dump)) {
+ CAM_ERR(CAM_ICP, "failed to create a5_fw_dump_lvl");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ return rc;
+err:
+ debugfs_remove_recursive(icp_hw_mgr.dentry);
+ icp_hw_mgr.dentry = NULL;
+ return rc;
+}
+
+static int cam_icp_mgr_process_cmd(void *priv, void *data)
+{
+ int rc;
+ struct hfi_cmd_work_data *task_data = NULL;
+ struct cam_icp_hw_mgr *hw_mgr;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
+ return -EINVAL;
+ }
+
+ hw_mgr = priv;
+ task_data = (struct hfi_cmd_work_data *)data;
+
+ rc = hfi_write_cmd(task_data->data);
+
+ return rc;
+}
+
+static int cam_icp_mgr_cleanup_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int i;
+ struct hfi_frame_process_info *hfi_frame_process;
+ struct cam_hw_done_event_data buf_data;
+
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+ if (!hfi_frame_process->request_id[i])
+ continue;
+ buf_data.request_id = hfi_frame_process->request_id[i];
+ ctx_data->ctxt_event_cb(ctx_data->context_priv,
+ false, &buf_data);
+ hfi_frame_process->request_id[i] = 0;
+ if (ctx_data->hfi_frame_process.in_resource[i] > 0) {
+ CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+ ctx_data->hfi_frame_process.in_resource[i]);
+ cam_sync_destroy(
+ ctx_data->hfi_frame_process.in_resource[i]);
+ ctx_data->hfi_frame_process.in_resource[i] = 0;
+ }
+ hfi_frame_process->fw_process_flag[i] = false;
+ clear_bit(i, ctx_data->hfi_frame_process.bitmap);
+ }
+
+ for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+ if (!hfi_frame_process->in_free_resource[i])
+ continue;
+
+ CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+ ctx_data->hfi_frame_process.in_free_resource[i]);
+ cam_sync_destroy(
+ ctx_data->hfi_frame_process.in_free_resource[i]);
+ ctx_data->hfi_frame_process.in_free_resource[i] = 0;
+ }
+
+ return 0;
+}
+
+static int cam_icp_mgr_handle_frame_process(uint32_t *msg_ptr, int flag)
+{
+ int i;
+ uint32_t idx;
+ uint64_t request_id;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+ struct hfi_frame_process_info *hfi_frame_process;
+ struct cam_hw_done_event_data buf_data;
+ uint32_t clk_type;
+
+ ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+ request_id = ioconfig_ack->user_data2;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "Invalid Context req %llu", request_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ cam_icp_ctx_timer_reset(ctx_data);
+ if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+ CAM_DBG(CAM_ICP, "ctx %u is in %d state",
+ ctx_data->ctx_id, ctx_data->state);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return 0;
+ }
+
+ CAM_DBG(CAM_REQ,
+ "ctx_id : %u, request_id :%lld dev_type: %d",
+ ctx_data->ctx_id, request_id,
+ ctx_data->icp_dev_acquire_info->dev_type);
+
+ clk_type = ICP_DEV_TYPE_TO_CLK_TYPE(
+ ctx_data->icp_dev_acquire_info->dev_type);
+ cam_icp_device_timer_reset(&icp_hw_mgr, clk_type);
+
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+ if (hfi_frame_process->request_id[i] == request_id)
+ break;
+
+ if (i >= CAM_FRAME_CMD_MAX) {
+ CAM_ERR(CAM_ICP, "pkt not found in ctx data for req_id =%lld",
+ request_id);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EINVAL;
+ }
+ idx = i;
+
+ if (flag == ICP_FRAME_PROCESS_FAILURE)
+ CAM_ERR(CAM_ICP, "Done with error: ctx_id %d req %llu dev %d",
+ ctx_data->ctx_id, request_id,
+ ctx_data->icp_dev_acquire_info->dev_type);
+
+ buf_data.request_id = hfi_frame_process->request_id[idx];
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, flag, &buf_data);
+ hfi_frame_process->request_id[idx] = 0;
+ if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+ CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+ ctx_data->hfi_frame_process.in_resource[idx]);
+ cam_sync_destroy(ctx_data->hfi_frame_process.in_resource[idx]);
+ ctx_data->hfi_frame_process.in_resource[idx] = 0;
+ }
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ hfi_frame_process->fw_process_flag[idx] = false;
+ mutex_unlock(&ctx_data->ctx_mutex);
+
+ return 0;
+}
+
+static int cam_icp_mgr_process_msg_frame_process(uint32_t *msg_ptr)
+{
+ struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+ struct hfi_msg_frame_process_done *frame_done;
+
+ if (!msg_ptr) {
+ CAM_ERR(CAM_ICP, "msg ptr is NULL");
+ return -EINVAL;
+ }
+
+ ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+ if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
+ CAM_ERR(CAM_ICP, "failed with error : %u",
+ ioconfig_ack->err_type);
+ cam_icp_mgr_handle_frame_process(msg_ptr,
+ ICP_FRAME_PROCESS_FAILURE);
+ return -EIO;
+ }
+
+ frame_done =
+ (struct hfi_msg_frame_process_done *)ioconfig_ack->msg_data;
+ if (!frame_done) {
+ cam_icp_mgr_handle_frame_process(msg_ptr,
+ ICP_FRAME_PROCESS_FAILURE);
+ return -EINVAL;
+ }
+
+ if (frame_done->result)
+ return cam_icp_mgr_handle_frame_process(msg_ptr,
+ ICP_FRAME_PROCESS_FAILURE);
+ else
+ return cam_icp_mgr_handle_frame_process(msg_ptr,
+ ICP_FRAME_PROCESS_SUCCESS);
+}
+
+static int cam_icp_mgr_process_msg_config_io(uint32_t *msg_ptr)
+{
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+ struct hfi_msg_ipe_config *ipe_config_ack = NULL;
+ struct hfi_msg_bps_common *bps_config_ack = NULL;
+
+ if (!msg_ptr) {
+ CAM_ERR(CAM_ICP, "msg ptr is NULL");
+ return -EINVAL;
+ }
+
+ ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+
+ if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO) {
+ ipe_config_ack =
+ (struct hfi_msg_ipe_config *)(ioconfig_ack->msg_data);
+ if (ipe_config_ack->rc) {
+ CAM_ERR(CAM_ICP, "rc = %d err = %u",
+ ipe_config_ack->rc, ioconfig_ack->err_type);
+ return -EIO;
+ }
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "wrong ctx data from IPE response");
+ return -EINVAL;
+ }
+ ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
+ } else {
+ bps_config_ack =
+ (struct hfi_msg_bps_common *)(ioconfig_ack->msg_data);
+ if (bps_config_ack->rc) {
+ CAM_ERR(CAM_ICP, "rc : %u, opcode :%u",
+ bps_config_ack->rc, ioconfig_ack->opcode);
+ return -EIO;
+ }
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "wrong ctx data from BPS response");
+ return -EINVAL;
+ }
+ }
+ complete(&ctx_data->wait_complete);
+
+ return 0;
+}
+
+static int cam_icp_mgr_process_msg_create_handle(uint32_t *msg_ptr)
+{
+ struct hfi_msg_create_handle_ack *create_handle_ack = NULL;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ int rc = 0;
+
+ create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
+ if (!create_handle_ack) {
+ CAM_ERR(CAM_ICP, "Invalid create_handle_ack");
+ return -EINVAL;
+ }
+
+ ctx_data =
+ (struct cam_icp_hw_ctx_data *)(uintptr_t)
+ create_handle_ack->user_data1;
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "Invalid ctx_data");
+ return -EINVAL;
+ }
+
+ if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE) {
+ ctx_data->fw_handle = create_handle_ack->fw_handle;
+ CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
+ } else {
+ CAM_WARN(CAM_ICP,
+ "This ctx is no longer in use current state: %d",
+ ctx_data->state);
+ ctx_data->fw_handle = 0;
+ rc = -EPERM;
+ }
+ complete(&ctx_data->wait_complete);
+ return rc;
+}
+
+static int cam_icp_mgr_process_msg_ping_ack(uint32_t *msg_ptr)
+{
+ struct hfi_msg_ping_ack *ping_ack = NULL;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+ ping_ack = (struct hfi_msg_ping_ack *)msg_ptr;
+ if (!ping_ack) {
+ CAM_ERR(CAM_ICP, "Empty ping ack message");
+ return -EINVAL;
+ }
+
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ping_ack->user_data);
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "Invalid ctx_data");
+ return -EINVAL;
+ }
+
+ if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)
+ complete(&ctx_data->wait_complete);
+
+ return 0;
+}
+
+static int cam_icp_mgr_process_indirect_ack_msg(uint32_t *msg_ptr)
+{
+ int rc;
+
+ if (!msg_ptr) {
+ CAM_ERR(CAM_ICP, "msg ptr is NULL");
+ return -EINVAL;
+ }
+
+ switch (msg_ptr[ICP_PACKET_OPCODE]) {
+ case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
+ CAM_DBG(CAM_ICP, "received IPE/BPS_CONFIG_IO:");
+ rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
+ if (rc)
+ return rc;
+ break;
+
+ case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
+ rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
+ if (rc)
+ return rc;
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid opcode : %u",
+ msg_ptr[ICP_PACKET_OPCODE]);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
+{
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ int rc = 0;
+
+ a5_dev_intf = icp_hw_mgr.a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+ return -EINVAL;
+ }
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+ switch (msg_ptr[ICP_PACKET_OPCODE]) {
+ case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
+ ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
+ if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
+ complete(&ctx_data->wait_complete);
+ CAM_DBG(CAM_ICP, "received IPE/BPS/ ABORT: ctx_state =%d",
+ ctx_data->state);
+ break;
+ case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
+ ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
+ if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) ||
+ (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) {
+ complete(&ctx_data->wait_complete);
+ }
+ CAM_DBG(CAM_ICP, "received IPE/BPS/ DESTROY: ctx_state =%d",
+ ctx_data->state);
+ break;
+ case HFI_IPEBPS_CMD_OPCODE_MEM_MAP:
+ ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+ ctx_data =
+ (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
+ complete(&ctx_data->wait_complete);
+ CAM_DBG(CAM_ICP,
+ "received IPE/BPS MAP ACK:ctx_state =%d err_status =%u",
+ ctx_data->state, ioconfig_ack->err_type);
+ break;
+ case HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP:
+ ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+ ctx_data =
+ (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
+ complete(&ctx_data->wait_complete);
+ CAM_DBG(CAM_ICP,
+ "received IPE/BPS UNMAP ACK:ctx_state =%d err_status =%u",
+ ctx_data->state, ioconfig_ack->err_type);
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid opcode : %u",
+ msg_ptr[ICP_PACKET_OPCODE]);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int cam_icp_ipebps_reset(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc = 0;
+ struct cam_hw_intf *ipe0_dev_intf;
+ struct cam_hw_intf *ipe1_dev_intf;
+ struct cam_hw_intf *bps_dev_intf;
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ rc = bps_dev_intf->hw_ops.process_cmd(
+ bps_dev_intf->hw_priv,
+ CAM_ICP_BPS_CMD_RESET,
+ NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ICP, "bps reset failed");
+
+ rc = ipe0_dev_intf->hw_ops.process_cmd(
+ ipe0_dev_intf->hw_priv,
+ CAM_ICP_IPE_CMD_RESET,
+ NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ICP, "ipe0 reset failed");
+
+ if (ipe1_dev_intf) {
+ rc = ipe1_dev_intf->hw_ops.process_cmd(
+ ipe1_dev_intf->hw_priv,
+ CAM_ICP_IPE_CMD_RESET,
+ NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ICP, "ipe1 reset failed");
+ }
+
+ return 0;
+}
+
+static int cam_icp_mgr_trigger_recovery(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc = 0;
+ struct sfr_buf *sfr_buffer = NULL;
+
+ CAM_DBG(CAM_ICP, "Enter");
+
+ if (atomic_read(&hw_mgr->recovery)) {
+ CAM_ERR(CAM_ICP, "Recovery is set");
+ return rc;
+ }
+
+ sfr_buffer = (struct sfr_buf *)icp_hw_mgr.hfi_mem.sfr_buf.kva;
+ CAM_WARN(CAM_ICP, "SFR:%s", sfr_buffer->msg);
+
+ cam_icp_ipebps_reset(hw_mgr);
+
+ atomic_set(&hw_mgr->recovery, 1);
+ CAM_DBG(CAM_ICP, "Done");
+ return rc;
+}
+static int cam_icp_mgr_process_fatal_error(
+ struct cam_icp_hw_mgr *hw_mgr, uint32_t *msg_ptr)
+{
+ struct hfi_msg_event_notify *event_notify;
+ int rc = 0;
+
+ CAM_DBG(CAM_ICP, "Enter");
+
+ event_notify = (struct hfi_msg_event_notify *)msg_ptr;
+ if (!event_notify) {
+ CAM_ERR(CAM_ICP, "Empty event message");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ICP, "evt_id: %u evt_data1: %u evt_data2: %u",
+ event_notify->event_id,
+ event_notify->event_data1,
+ event_notify->event_data2);
+
+ if (event_notify->event_id == HFI_EVENT_SYS_ERROR) {
+ CAM_INFO(CAM_ICP, "received HFI_EVENT_SYS_ERROR");
+ rc = cam_icp_mgr_trigger_recovery(hw_mgr);
+ }
+
+ return rc;
+}
+
+static void cam_icp_mgr_process_dbg_buf(void)
+{
+ uint32_t *msg_ptr = NULL, *pkt_ptr = NULL;
+ struct hfi_msg_debug *dbg_msg;
+ uint32_t read_len, size_processed = 0;
+ uint64_t timestamp = 0;
+ char *dbg_buf;
+ int rc = 0;
+
+ rc = hfi_read_message(icp_hw_mgr.dbg_buf, Q_DBG, &read_len);
+ if (rc)
+ return;
+
+ msg_ptr = (uint32_t *)icp_hw_mgr.dbg_buf;
+ while (true) {
+ pkt_ptr = msg_ptr;
+ if (pkt_ptr[ICP_PACKET_TYPE] == HFI_MSG_SYS_DEBUG) {
+ dbg_msg = (struct hfi_msg_debug *)pkt_ptr;
+ dbg_buf = (char *)&dbg_msg->msg_data;
+ timestamp = ((((uint64_t)(dbg_msg->timestamp_hi) << 32)
+ | dbg_msg->timestamp_lo) >> 16);
+ trace_cam_icp_fw_dbg(dbg_buf, timestamp/2);
+ }
+ size_processed += (pkt_ptr[ICP_PACKET_SIZE] >>
+ BYTE_WORD_SHIFT);
+ if (size_processed >= read_len)
+ return;
+ msg_ptr += (pkt_ptr[ICP_PACKET_SIZE] >>
+ BYTE_WORD_SHIFT);
+ pkt_ptr = NULL;
+ dbg_msg = NULL;
+ dbg_buf = NULL;
+ }
+}
+
+static int cam_icp_process_msg_pkt_type(
+ struct cam_icp_hw_mgr *hw_mgr,
+ uint32_t *msg_ptr,
+ uint32_t *msg_processed_len)
+{
+ int rc = 0;
+ int size_processed = 0;
+
+ switch (msg_ptr[ICP_PACKET_TYPE]) {
+ case HFI_MSG_SYS_INIT_DONE:
+ CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
+ complete(&hw_mgr->a5_complete);
+ size_processed = (
+ (struct hfi_msg_init_done *)msg_ptr)->size;
+ break;
+
+ case HFI_MSG_SYS_PC_PREP_DONE:
+ CAM_DBG(CAM_ICP, "HFI_MSG_SYS_PC_PREP_DONE is received\n");
+ complete(&hw_mgr->a5_complete);
+ size_processed = sizeof(struct hfi_msg_pc_prep_done);
+ break;
+
+ case HFI_MSG_SYS_PING_ACK:
+ CAM_DBG(CAM_ICP, "received SYS_PING_ACK");
+ rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
+ size_processed = sizeof(struct hfi_msg_ping_ack);
+ break;
+
+ case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
+ CAM_DBG(CAM_ICP, "received IPEBPS_CREATE_HANDLE_ACK");
+ rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
+ size_processed = sizeof(struct hfi_msg_create_handle_ack);
+ break;
+
+ case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
+ CAM_DBG(CAM_ICP, "received ASYNC_INDIRECT_ACK");
+ rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
+ size_processed = (
+ (struct hfi_msg_ipebps_async_ack *)msg_ptr)->size;
+ break;
+
+ case HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
+ CAM_DBG(CAM_ICP, "received ASYNC_DIRECT_ACK");
+ rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
+ size_processed = (
+ (struct hfi_msg_ipebps_async_ack *)msg_ptr)->size;
+ break;
+
+ case HFI_MSG_EVENT_NOTIFY:
+ CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
+ size_processed = (
+ (struct hfi_msg_event_notify *)msg_ptr)->size;
+ rc = cam_icp_mgr_process_fatal_error(hw_mgr, msg_ptr);
+ if (rc)
+ CAM_ERR(CAM_ICP, "failed in processing evt notify");
+
+ break;
+
+ default:
+ CAM_ERR(CAM_ICP, "invalid msg : %u",
+ msg_ptr[ICP_PACKET_TYPE]);
+ rc = -EINVAL;
+ break;
+ }
+
+ *msg_processed_len = size_processed;
+ return rc;
+}
+
+static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
+{
+ uint32_t read_len, msg_processed_len;
+ uint32_t *msg_ptr = NULL;
+ struct hfi_msg_work_data *task_data;
+ struct cam_icp_hw_mgr *hw_mgr;
+ int rc = 0;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_ICP, "Invalid data");
+ return -EINVAL;
+ }
+
+ task_data = data;
+ hw_mgr = priv;
+
+ rc = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG, &read_len);
+ if (rc) {
+ CAM_DBG(CAM_ICP, "Unable to read msg q rc %d", rc);
+ } else {
+ read_len = read_len << BYTE_WORD_SHIFT;
+ msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
+ while (true) {
+ cam_icp_process_msg_pkt_type(hw_mgr, msg_ptr,
+ &msg_processed_len);
+
+ if (!msg_processed_len) {
+ CAM_ERR(CAM_ICP, "Failed to read");
+ rc = -EINVAL;
+ break;
+ }
+
+ read_len -= msg_processed_len;
+ if (read_len > 0) {
+ msg_ptr += (msg_processed_len >>
+ BYTE_WORD_SHIFT);
+ msg_processed_len = 0;
+ } else {
+ break;
+ }
+ }
+ }
+
+ if (icp_hw_mgr.a5_debug_type ==
+ HFI_DEBUG_MODE_QUEUE)
+ cam_icp_mgr_process_dbg_buf();
+
+ if ((task_data->irq_status & A5_WDT_0) ||
+ (task_data->irq_status & A5_WDT_1)) {
+ CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
+
+ rc = cam_icp_mgr_trigger_recovery(hw_mgr);
+ }
+
+ return rc;
+}
+
+int32_t cam_icp_hw_mgr_cb(uint32_t irq_status, void *data)
+{
+ int32_t rc = 0;
+ unsigned long flags;
+ struct cam_icp_hw_mgr *hw_mgr = data;
+ struct crm_workq_task *task;
+ struct hfi_msg_work_data *task_data;
+
+ if (!data) {
+ CAM_ERR(CAM_ICP, "irq cb data is NULL");
+ return rc;
+ }
+
+ spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+ return -ENOMEM;
+ }
+
+ task_data = (struct hfi_msg_work_data *)task->payload;
+ task_data->data = hw_mgr;
+ task_data->irq_status = irq_status;
+ task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_icp_mgr_process_msg;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+ return rc;
+}
+
+static void cam_icp_free_hfi_mem(void)
+{
+ int rc;
+
+ cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+ rc = cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
+ if (rc)
+ CAM_ERR(CAM_ICP, "failed to unreserve sec heap");
+
+ cam_smmu_dealloc_qdss(icp_hw_mgr.iommu_hdl);
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+}
+
+static int cam_icp_alloc_secheap_mem(struct cam_mem_mgr_memory_desc *secheap)
+{
+ int rc;
+ struct cam_mem_mgr_request_desc alloc;
+ struct cam_mem_mgr_memory_desc out;
+ struct cam_smmu_region_info secheap_info;
+
+ memset(&alloc, 0, sizeof(alloc));
+ memset(&out, 0, sizeof(out));
+
+ rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+ CAM_SMMU_REGION_SECHEAP,
+ &secheap_info);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to get secheap memory info");
+ return rc;
+ }
+
+ alloc.size = secheap_info.iova_len;
+ alloc.align = 0;
+ alloc.flags = 0;
+ alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+ rc = cam_mem_mgr_reserve_memory_region(&alloc,
+ CAM_SMMU_REGION_SECHEAP,
+ &out);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to reserve secheap memory");
+ return rc;
+ }
+
+ *secheap = out;
+ CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+ out.kva, out.iova, out.mem_handle, out.len);
+
+ return rc;
+}
+
+static int cam_icp_alloc_sfr_mem(struct cam_mem_mgr_memory_desc *sfr)
+{
+ int rc;
+ struct cam_mem_mgr_request_desc alloc;
+ struct cam_mem_mgr_memory_desc out;
+
+ memset(&alloc, 0, sizeof(alloc));
+ memset(&out, 0, sizeof(out));
+ alloc.size = SZ_8K;
+ alloc.align = 0;
+ alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE |
+ CAM_MEM_FLAG_HW_SHARED_ACCESS;
+
+ alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+ rc = cam_mem_mgr_request_mem(&alloc, &out);
+ if (rc)
+ return rc;
+
+ *sfr = out;
+ CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+ out.kva, out.iova, out.mem_handle, out.len);
+
+ return rc;
+}
+
+static int cam_icp_alloc_shared_mem(struct cam_mem_mgr_memory_desc *qtbl)
+{
+ int rc;
+ struct cam_mem_mgr_request_desc alloc;
+ struct cam_mem_mgr_memory_desc out;
+
+ memset(&alloc, 0, sizeof(alloc));
+ memset(&out, 0, sizeof(out));
+ alloc.size = SZ_1M;
+ alloc.align = 0;
+ alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE |
+ CAM_MEM_FLAG_HW_SHARED_ACCESS;
+ alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+ rc = cam_mem_mgr_request_mem(&alloc, &out);
+ if (rc)
+ return rc;
+
+ *qtbl = out;
+ CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+ out.kva, out.iova, out.mem_handle, out.len);
+
+ return rc;
+}
+
+static int cam_icp_allocate_fw_mem(void)
+{
+ int rc;
+ uintptr_t kvaddr;
+ size_t len;
+ dma_addr_t iova;
+
+ rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
+ &iova, &kvaddr, &len);
+ if (rc)
+ return -ENOMEM;
+
+ icp_hw_mgr.hfi_mem.fw_buf.len = len;
+ icp_hw_mgr.hfi_mem.fw_buf.kva = kvaddr;
+ icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
+ icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
+
+ CAM_DBG(CAM_ICP, "kva: %zX, iova: %llx, len: %zu",
+ kvaddr, iova, len);
+
+ return rc;
+}
+
+static int cam_icp_allocate_qdss_mem(void)
+{
+ int rc;
+ size_t len;
+ dma_addr_t iova;
+
+ rc = cam_smmu_alloc_qdss(icp_hw_mgr.iommu_hdl,
+ &iova, &len);
+ if (rc)
+ return rc;
+
+ icp_hw_mgr.hfi_mem.qdss_buf.len = len;
+ icp_hw_mgr.hfi_mem.qdss_buf.iova = iova;
+ icp_hw_mgr.hfi_mem.qdss_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
+
+ CAM_DBG(CAM_ICP, "iova: %llx, len: %zu", iova, len);
+
+ return rc;
+}
+
+static int cam_icp_allocate_hfi_mem(void)
+{
+ int rc;
+
+ rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+ CAM_SMMU_REGION_SHARED,
+ &icp_hw_mgr.hfi_mem.shmem);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to get shared memory info");
+ return rc;
+ }
+
+ rc = cam_icp_allocate_fw_mem();
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate FW memory");
+ return rc;
+ }
+
+ rc = cam_icp_allocate_qdss_mem();
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate qdss memory");
+ goto fw_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate qtbl memory");
+ goto qtbl_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate cmd q memory");
+ goto cmd_q_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate msg q memory");
+ goto msg_q_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate dbg q memory");
+ goto dbg_q_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_sfr_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate sfr buffer");
+ goto sfr_buf_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_secheap_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate sec heap memory");
+ goto sec_heap_alloc_failed;
+ }
+
+ return rc;
+sec_heap_alloc_failed:
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+sfr_buf_alloc_failed:
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+dbg_q_alloc_failed:
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+msg_q_alloc_failed:
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+cmd_q_alloc_failed:
+ cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+qtbl_alloc_failed:
+ cam_smmu_dealloc_qdss(icp_hw_mgr.iommu_hdl);
+fw_alloc_failed:
+ cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+ return rc;
+}
+
+static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int i = 0;
+
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+ mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+ if (hw_mgr->ctx_data[i].state == CAM_ICP_CTX_STATE_FREE) {
+ hw_mgr->ctx_data[i].state = CAM_ICP_CTX_STATE_IN_USE;
+ mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+ break;
+ }
+ mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+ }
+
+ return i;
+}
+
+static void cam_icp_mgr_put_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ ctx_data->state = CAM_ICP_CTX_STATE_FREE;
+}
+
+static int cam_icp_mgr_send_pc_prep(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&hw_mgr->a5_complete);
+ CAM_DBG(CAM_ICP, "Sending HFI init command");
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv, CAM_ICP_A5_CMD_PC_PREP, NULL, 0);
+ if (rc)
+ return rc;
+
+ CAM_DBG(CAM_ICP, "Wait for PC_PREP_DONE Message\n");
+ rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "PC_PREP response timed out %d\n", rc);
+ }
+ CAM_DBG(CAM_ICP, "Done Waiting for PC_PREP Message\n");
+
+ return rc;
+}
+
+static int cam_ipe_bps_deint(struct cam_icp_hw_mgr *hw_mgr)
+{
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+ if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+ return 0;
+ }
+
+ if (ipe1_dev_intf && hw_mgr->ipe_clk_state) {
+ ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+ NULL, 0);
+ }
+
+ if (hw_mgr->ipe_clk_state)
+ ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ if (hw_mgr->bps_clk_state)
+ bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+
+
+ hw_mgr->bps_clk_state = false;
+ hw_mgr->ipe_clk_state = false;
+
+ return 0;
+}
+
+static int cam_icp_mgr_hw_close_u(void *hw_priv, void *hw_close_args)
+{
+ struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+ int rc = 0;
+
+ CAM_DBG(CAM_ICP, "UMD calls close");
+ if (!hw_mgr) {
+ CAM_ERR(CAM_ICP, "Null hw mgr");
+ return 0;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ rc = cam_icp_mgr_hw_close(hw_mgr, NULL);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_icp_mgr_hw_close_k(void *hw_priv, void *hw_close_args)
+{
+ struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+
+ CAM_DBG(CAM_ICP, "KMD calls close");
+ if (!hw_mgr) {
+ CAM_ERR(CAM_ICP, "Null hw mgr");
+ return 0;
+ }
+
+ return cam_icp_mgr_hw_close(hw_mgr, NULL);
+
+}
+
+static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+
+ CAM_DBG(CAM_ICP, "ENTER");
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+ return -EINVAL;
+ }
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+ if (!hw_mgr->icp_pc_flag || atomic_read(&hw_mgr->recovery)) {
+ cam_hfi_disable_cpu(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+ rc = cam_icp_mgr_hw_close_k(hw_mgr, NULL);
+ } else {
+ CAM_DBG(CAM_PERF, "Sending PC prep ICP PC enabled");
+ rc = cam_icp_mgr_send_pc_prep(hw_mgr);
+ cam_hfi_disable_cpu(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+ }
+ a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+ CAM_DBG(CAM_ICP, "EXIT");
+
+ return rc;
+}
+
+static int cam_icp_mgr_hfi_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct hfi_mem_info hfi_mem;
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+ return -EINVAL;
+ }
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+ hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
+ hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
+ hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
+ CAM_DBG(CAM_ICP, "qtbl kva = %llX IOVA = %X length = %lld\n",
+ hfi_mem.qtbl.kva, hfi_mem.qtbl.iova, hfi_mem.qtbl.len);
+
+ hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
+ hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
+ hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
+ CAM_DBG(CAM_ICP, "cmd_q kva = %llX IOVA = %X length = %lld\n",
+ hfi_mem.cmd_q.kva, hfi_mem.cmd_q.iova, hfi_mem.cmd_q.len);
+
+ hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
+ hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
+ hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
+ CAM_DBG(CAM_ICP, "msg_q kva = %llX IOVA = %X length = %lld\n",
+ hfi_mem.msg_q.kva, hfi_mem.msg_q.iova, hfi_mem.msg_q.len);
+
+ hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
+ hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
+ hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
+ CAM_DBG(CAM_ICP, "dbg_q kva = %llX IOVA = %X length = %lld\n",
+ hfi_mem.dbg_q.kva, hfi_mem.dbg_q.iova, hfi_mem.dbg_q.len);
+
+ hfi_mem.sfr_buf.kva = icp_hw_mgr.hfi_mem.sfr_buf.kva;
+ hfi_mem.sfr_buf.iova = icp_hw_mgr.hfi_mem.sfr_buf.iova;
+ hfi_mem.sfr_buf.len = icp_hw_mgr.hfi_mem.sfr_buf.len;
+ CAM_DBG(CAM_ICP, "sfr kva = %llX IOVA = %X length = %lld\n",
+ hfi_mem.sfr_buf.kva, hfi_mem.sfr_buf.iova,
+ hfi_mem.sfr_buf.len);
+
+ hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
+ hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
+ hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+
+ hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+ hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
+ hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
+ hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
+ return cam_hfi_resume(&hfi_mem,
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
+ hw_mgr->a5_jtag_debug);
+}
+
+static int cam_icp_mgr_abort_handle(
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int rc = 0;
+ unsigned long rem_jiffies;
+ size_t packet_size;
+ int timeout = 100;
+ struct hfi_cmd_ipebps_async *abort_cmd;
+
+ packet_size =
+ sizeof(struct hfi_cmd_ipebps_async) +
+ sizeof(struct hfi_cmd_abort) -
+ sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
+ abort_cmd = kzalloc(packet_size, GFP_KERNEL);
+ CAM_DBG(CAM_ICP, "abort pkt size = %d", (int) packet_size);
+ if (!abort_cmd) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ abort_cmd->size = packet_size;
+ abort_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ abort_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_ABORT;
+ else
+ abort_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_ABORT;
+
+ reinit_completion(&ctx_data->wait_complete);
+ abort_cmd->num_fw_handles = 1;
+ abort_cmd->fw_handles[0] = ctx_data->fw_handle;
+ abort_cmd->user_data1 = PTR_TO_U64(ctx_data);
+ abort_cmd->user_data2 = (uint64_t)0x0;
+
+ rc = hfi_write_cmd(abort_cmd);
+ if (rc) {
+ kfree(abort_cmd);
+ return rc;
+ }
+ CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
+ ctx_data->fw_handle, ctx_data);
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "FW timeout/err in abort handle command");
+ cam_hfi_queue_dump();
+ }
+
+ kfree(abort_cmd);
+ return rc;
+}
+
+static int cam_icp_mgr_destroy_handle(
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int rc = 0;
+ int timeout = 100;
+ unsigned long rem_jiffies;
+ size_t packet_size;
+ struct hfi_cmd_ipebps_async *destroy_cmd;
+
+ packet_size =
+ sizeof(struct hfi_cmd_ipebps_async) +
+ sizeof(struct hfi_cmd_abort_destroy) -
+ sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
+ destroy_cmd = kzalloc(packet_size, GFP_KERNEL);
+ if (!destroy_cmd) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ destroy_cmd->size = packet_size;
+ destroy_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ destroy_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY;
+ else
+ destroy_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY;
+
+ reinit_completion(&ctx_data->wait_complete);
+ destroy_cmd->num_fw_handles = 1;
+ destroy_cmd->fw_handles[0] = ctx_data->fw_handle;
+ destroy_cmd->user_data1 = PTR_TO_U64(ctx_data);
+ destroy_cmd->user_data2 = (uint64_t)0x0;
+ memcpy(destroy_cmd->payload.direct, &ctx_data->temp_payload,
+ sizeof(uint64_t));
+
+ rc = hfi_write_cmd(destroy_cmd);
+ if (rc) {
+ kfree(destroy_cmd);
+ return rc;
+ }
+ CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
+ ctx_data->fw_handle, ctx_data);
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "FW response timeout: %d for %u",
+ rc, ctx_data->ctx_id);
+ if (icp_hw_mgr.a5_debug_type ==
+ HFI_DEBUG_MODE_QUEUE)
+ cam_icp_mgr_process_dbg_buf();
+ cam_hfi_queue_dump();
+ }
+ kfree(destroy_cmd);
+ return rc;
+}
+
+static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
+{
+ int i = 0;
+
+ if (ctx_id >= CAM_ICP_CTX_MAX) {
+ CAM_ERR(CAM_ICP, "ctx_id is wrong: %d", ctx_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+ if (hw_mgr->ctx_data[ctx_id].state !=
+ CAM_ICP_CTX_STATE_ACQUIRED) {
+ mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+ CAM_DBG(CAM_ICP,
+ "ctx with id: %d not in right state to release: %d",
+ ctx_id, hw_mgr->ctx_data[ctx_id].state);
+ return 0;
+ }
+ cam_icp_mgr_ipe_bps_power_collapse(hw_mgr,
+ &hw_mgr->ctx_data[ctx_id], 0);
+ hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_RELEASE;
+ CAM_DBG(CAM_ICP, "E: ctx_id = %d recovery = %d",
+ ctx_id, atomic_read(&hw_mgr->recovery));
+ cam_icp_mgr_abort_handle(&hw_mgr->ctx_data[ctx_id]);
+ cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
+ cam_icp_mgr_cleanup_ctx(&hw_mgr->ctx_data[ctx_id]);
+
+ hw_mgr->ctx_data[ctx_id].fw_handle = 0;
+ hw_mgr->ctx_data[ctx_id].scratch_mem_size = 0;
+ for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+ clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+ kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+ hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap = NULL;
+ cam_icp_hw_mgr_clk_info_update(hw_mgr, &hw_mgr->ctx_data[ctx_id]);
+ hw_mgr->ctx_data[ctx_id].clk_info.curr_fc = 0;
+ hw_mgr->ctx_data[ctx_id].clk_info.base_clk = 0;
+ hw_mgr->ctxt_cnt--;
+ kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+ hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+ hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_FREE;
+ cam_icp_ctx_timer_stop(&hw_mgr->ctx_data[ctx_id]);
+ mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+ CAM_DBG(CAM_ICP, "X: ctx_id = %d", ctx_id);
+ return 0;
+}
+
+static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+ return;
+ }
+
+ if (ipe1_dev_intf)
+ ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv, NULL, 0);
+ ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+ a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->bps_clk_state = false;
+ hw_mgr->ipe_clk_state = false;
+}
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
+{
+ struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct cam_icp_a5_set_irq_cb irq_cb;
+ struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+ int rc = 0;
+
+ CAM_DBG(CAM_ICP, "E");
+ if (hw_mgr->fw_download == false) {
+ CAM_DBG(CAM_ICP, "hw mgr is already closed");
+ return 0;
+ }
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_DBG(CAM_ICP, "a5_dev_intf is NULL");
+ return -EINVAL;
+ }
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+ fw_buf_info.kva = 0;
+ fw_buf_info.iova = 0;
+ fw_buf_info.len = 0;
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_CMD_SET_FW_BUF,
+ &fw_buf_info,
+ sizeof(fw_buf_info));
+ if (rc)
+ CAM_ERR(CAM_ICP, "nullify the fw buf failed");
+ cam_hfi_deinit(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+ irq_cb.icp_hw_mgr_cb = NULL;
+ irq_cb.data = NULL;
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_SET_IRQ_CB,
+ &irq_cb, sizeof(irq_cb));
+ if (rc)
+ CAM_ERR(CAM_ICP, "deregister irq call back failed");
+
+ cam_icp_free_hfi_mem();
+ hw_mgr->fw_download = false;
+
+ CAM_DBG(CAM_ICP, "Exit");
+ return rc;
+}
+
+static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc = 0;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+ ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+ bps_dev_intf = hw_mgr->bps_dev_intf;
+
+ if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong");
+ return -EINVAL;
+ }
+
+ rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+ if (rc)
+ goto a5_dev_init_failed;
+
+ rc = bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
+ if (rc)
+ goto bps_dev_init_failed;
+
+ rc = ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
+ if (rc)
+ goto ipe0_dev_init_failed;
+
+ if (ipe1_dev_intf) {
+ rc = ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
+ NULL, 0);
+ if (rc)
+ goto ipe1_dev_init_failed;
+ }
+
+ hw_mgr->bps_clk_state = true;
+ hw_mgr->ipe_clk_state = true;
+
+ return rc;
+ipe1_dev_init_failed:
+ ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->ipe_clk_state = false;
+ipe0_dev_init_failed:
+ bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+ hw_mgr->bps_clk_state = false;
+bps_dev_init_failed:
+ a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+a5_dev_init_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct cam_icp_a5_set_irq_cb irq_cb;
+ struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+ return -EINVAL;
+ }
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+ irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
+ irq_cb.data = hw_mgr;
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_SET_IRQ_CB,
+ &irq_cb, sizeof(irq_cb));
+ if (rc)
+ goto set_irq_failed;
+
+ fw_buf_info.kva = icp_hw_mgr.hfi_mem.fw_buf.kva;
+ fw_buf_info.iova = icp_hw_mgr.hfi_mem.fw_buf.iova;
+ fw_buf_info.len = icp_hw_mgr.hfi_mem.fw_buf.len;
+
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_CMD_SET_FW_BUF,
+ &fw_buf_info, sizeof(fw_buf_info));
+ if (rc)
+ goto set_irq_failed;
+
+ cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_CMD_FW_DOWNLOAD,
+ NULL, 0);
+ if (rc)
+ goto fw_download_failed;
+
+ return rc;
+fw_download_failed:
+ cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+set_irq_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct hfi_mem_info hfi_mem;
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+ return -EINVAL;
+ }
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+ hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
+ hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
+ hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
+
+ hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
+ hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
+ hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
+
+ hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
+ hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
+ hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
+
+ hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
+ hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
+ hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
+
+ hfi_mem.sfr_buf.kva = icp_hw_mgr.hfi_mem.sfr_buf.kva;
+ hfi_mem.sfr_buf.iova = icp_hw_mgr.hfi_mem.sfr_buf.iova;
+ hfi_mem.sfr_buf.len = icp_hw_mgr.hfi_mem.sfr_buf.len;
+
+ hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
+ hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
+ hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+
+ hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+ hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
+ hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
+ hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
+
+ return cam_hfi_init(0, &hfi_mem,
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
+ hw_mgr->a5_jtag_debug);
+}
+
+static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+ return -EINVAL;
+ }
+
+ reinit_completion(&hw_mgr->a5_complete);
+ CAM_DBG(CAM_ICP, "Sending HFI init command");
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_SEND_INIT,
+ NULL, 0);
+ if (rc)
+ return rc;
+
+ rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
+ }
+ CAM_DBG(CAM_ICP, "Done Waiting for INIT DONE Message");
+
+ return rc;
+}
+
+static int cam_icp_mgr_hw_open_u(void *hw_mgr_priv, void *download_fw_args)
+{
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ int rc = 0;
+
+ if (!hw_mgr) {
+ CAM_ERR(CAM_ICP, "Null hw mgr");
+ return 0;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ rc = cam_icp_mgr_hw_open(hw_mgr, download_fw_args);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_icp_mgr_hw_open_k(void *hw_mgr_priv, void *download_fw_args)
+{
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+
+ if (!hw_mgr) {
+ CAM_ERR(CAM_ICP, "Null hw mgr");
+ return 0;
+ }
+
+ return cam_icp_mgr_hw_open(hw_mgr, download_fw_args);
+}
+
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc = 0;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ bool downloadFromResume = true;
+
+ CAM_DBG(CAM_ICP, "Enter");
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
+ return -EINVAL;
+ }
+
+ if (hw_mgr->fw_download == false) {
+ CAM_DBG(CAM_ICP, "Downloading FW");
+ rc = cam_icp_mgr_hw_open_k(hw_mgr, &downloadFromResume);
+ CAM_DBG(CAM_ICP, "FW Download Done Exit");
+ return rc;
+ }
+
+ rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+ if (rc)
+ return -EINVAL;
+
+ rc = cam_icp_mgr_hfi_resume(hw_mgr);
+ if (rc)
+ goto hfi_resume_failed;
+
+ CAM_DBG(CAM_ICP, "Exit");
+ return rc;
+hfi_resume_failed:
+ cam_icp_mgr_icp_power_collapse(hw_mgr);
+ return rc;
+}
+
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ bool icp_pc = false;
+ int rc = 0;
+
+ if (!hw_mgr) {
+ CAM_ERR(CAM_ICP, "hw_mgr is NULL");
+ return -EINVAL;
+ }
+
+ if (hw_mgr->fw_download) {
+ CAM_DBG(CAM_ICP, "FW already downloaded");
+ return rc;
+ }
+
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+ return -EINVAL;
+ }
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+ rc = cam_icp_allocate_hfi_mem();
+ if (rc)
+ goto alloc_hfi_mem_failed;
+
+ rc = cam_icp_mgr_device_init(hw_mgr);
+ if (rc)
+ goto dev_init_fail;
+
+ rc = cam_icp_mgr_fw_download(hw_mgr);
+ if (rc)
+ goto fw_download_failed;
+
+ rc = cam_icp_mgr_hfi_init(hw_mgr);
+ if (rc)
+ goto hfi_init_failed;
+
+ rc = cam_icp_mgr_send_fw_init(hw_mgr);
+ if (rc)
+ goto fw_init_failed;
+
+ hw_mgr->ctxt_cnt = 0;
+ hw_mgr->fw_download = true;
+ atomic_set(&hw_mgr->recovery, 0);
+
+ CAM_INFO(CAM_ICP, "FW download done successfully");
+
+ rc = cam_ipe_bps_deint(hw_mgr);
+ if (download_fw_args)
+ icp_pc = *((bool *)download_fw_args);
+
+ if (download_fw_args && icp_pc == true && hw_mgr->icp_pc_flag) {
+ rc = cam_ipe_bps_deint(hw_mgr);
+ CAM_DBG(CAM_ICP, "deinit all clocks");
+ }
+
+ if (download_fw_args && icp_pc == true)
+ return rc;
+
+ rc = cam_ipe_bps_deint(hw_mgr);
+ rc = cam_icp_mgr_icp_power_collapse(hw_mgr);
+ CAM_DBG(CAM_ICP, "deinit all clocks at boot up");
+
+ return rc;
+
+fw_init_failed:
+ cam_hfi_deinit(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+hfi_init_failed:
+ cam_hfi_disable_cpu(
+ a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+fw_download_failed:
+ cam_icp_mgr_device_deinit(hw_mgr);
+dev_init_fail:
+ cam_icp_free_hfi_mem();
+alloc_hfi_mem_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_handle_config_err(
+ struct cam_hw_config_args *config_args,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ int idx)
+{
+ struct cam_hw_done_event_data buf_data;
+
+ buf_data.request_id = *(uint64_t *)config_args->priv;
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
+
+ ctx_data->hfi_frame_process.request_id[idx] = 0;
+ ctx_data->hfi_frame_process.fw_process_flag[idx] = false;
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+
+ return 0;
+}
+
+static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_hw_config_args *config_args)
+{
+ int rc = 0;
+ uint64_t request_id = 0;
+ struct crm_workq_task *task;
+ struct hfi_cmd_work_data *task_data;
+ struct hfi_cmd_ipebps_async *hfi_cmd;
+ struct cam_hw_update_entry *hw_update_entries;
+ struct icp_frame_info *frame_info = NULL;
+
+ frame_info = (struct icp_frame_info *)config_args->priv;
+ request_id = frame_info->request_id;
+ hw_update_entries = config_args->hw_update_entries;
+ CAM_DBG(CAM_ICP, "req_id = %lld %pK", request_id, config_args->priv);
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ return -ENOMEM;
+ }
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)hw_update_entries->addr;
+ hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
+ task_data->request_id = request_id;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+
+ return rc;
+}
+
+static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
+ uint32_t io_buf_addr)
+{
+ int rc = 0;
+ struct hfi_cmd_work_data *task_data;
+ struct hfi_cmd_ipebps_async ioconfig_cmd;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
+ struct crm_workq_task *task;
+ uint32_t size_in_words;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
+ ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
+ ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+ else
+ ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+ reinit_completion(&ctx_data->wait_complete);
+
+ ioconfig_cmd.num_fw_handles = 1;
+ ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
+ ioconfig_cmd.payload.indirect = io_buf_addr;
+ ioconfig_cmd.user_data1 = PTR_TO_U64(ctx_data);
+ ioconfig_cmd.user_data2 = (uint64_t)0x0;
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)&ioconfig_cmd;
+ task_data->request_id = 0;
+ task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ size_in_words = (*(uint32_t *)task_data->data) >> 2;
+ CAM_DBG(CAM_ICP, "size_in_words %u", size_in_words);
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
+ }
+
+ return rc;
+}
+
+static int cam_icp_mgr_send_recfg_io(struct cam_icp_hw_ctx_data *ctx_data,
+ struct hfi_cmd_ipebps_async *ioconfig_cmd, uint64_t req_id)
+{
+ int rc = 0;
+ struct hfi_cmd_work_data *task_data;
+ struct crm_workq_task *task;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)ioconfig_cmd;
+ task_data->request_id = req_id;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+ int rc = 0;
+ int idx;
+ uint64_t req_id;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_config_args *config_args = config_hw_args;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct icp_frame_info *frame_info = NULL;
+
+ if (!hw_mgr || !config_args) {
+ CAM_ERR(CAM_ICP, "Invalid arguments %pK %pK",
+ hw_mgr, config_args);
+ return -EINVAL;
+ }
+
+ if (!config_args->num_hw_update_entries) {
+ CAM_ERR(CAM_ICP, "No hw update enteries are available");
+ return -EINVAL;
+ }
+
+ ctx_data = config_args->ctxt_to_hw_map;
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ mutex_lock(&ctx_data->ctx_mutex);
+ if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_ICP, "ctx id :%u is not in use",
+ ctx_data->ctx_id);
+ return -EINVAL;
+ }
+
+ frame_info = (struct icp_frame_info *)config_args->priv;
+ req_id = frame_info->request_id;
+ idx = cam_icp_clk_idx_from_req_id(ctx_data, req_id);
+ cam_icp_mgr_ipe_bps_clk_update(hw_mgr, ctx_data, idx);
+ ctx_data->hfi_frame_process.fw_process_flag[idx] = true;
+
+ CAM_DBG(CAM_ICP, "req_id %llu, io config %llu", req_id,
+ frame_info->io_config);
+
+ if (frame_info->io_config != 0) {
+ CAM_INFO(CAM_ICP, "Send recfg io");
+ rc = cam_icp_mgr_send_recfg_io(ctx_data,
+ &frame_info->hfi_cfg_io_cmd, req_id);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Fail to send reconfig io cmd");
+ }
+
+ rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
+ if (rc)
+ goto config_err;
+ CAM_DBG(CAM_REQ,
+ "req_id = %lld on ctx_id %u for dev %d queued to FW",
+ req_id, ctx_data->ctx_id,
+ ctx_data->icp_dev_acquire_info->dev_type);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return 0;
+config_err:
+ cam_icp_mgr_handle_config_err(config_args, ctx_data, idx);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+}
+
+static int cam_icp_mgr_prepare_frame_process_cmd(
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct hfi_cmd_ipebps_async *hfi_cmd,
+ uint64_t request_id,
+ uint32_t fw_cmd_buf_iova_addr)
+{
+ hfi_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
+ hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS;
+ else
+ hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS;
+ hfi_cmd->num_fw_handles = 1;
+ hfi_cmd->fw_handles[0] = ctx_data->fw_handle;
+ hfi_cmd->payload.indirect = fw_cmd_buf_iova_addr;
+ hfi_cmd->user_data1 = PTR_TO_U64(ctx_data);
+ hfi_cmd->user_data2 = request_id;
+
+ CAM_DBG(CAM_ICP, "ctx_data : %pK, request_id :%lld cmd_buf %x",
+ (void *)ctx_data->context_priv, request_id,
+ fw_cmd_buf_iova_addr);
+
+ return 0;
+}
+
+static bool cam_icp_mgr_is_valid_inconfig(struct cam_packet *packet)
+{
+ int i, num_in_map_entries = 0;
+ bool in_config_valid = false;
+ struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+ io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+ packet->io_configs_offset/4);
+
+ for (i = 0 ; i < packet->num_io_configs; i++)
+ if (io_cfg_ptr[i].direction == CAM_BUF_INPUT)
+ num_in_map_entries++;
+
+ if (num_in_map_entries <= CAM_MAX_IN_RES) {
+ in_config_valid = true;
+ } else {
+ CAM_ERR(CAM_ICP, "In config entries(%u) more than allowed(%u)",
+ num_in_map_entries, CAM_MAX_IN_RES);
+ }
+
+ CAM_DBG(CAM_ICP, "number of in_config info: %u %u %u %u",
+ packet->num_io_configs, IPE_IO_IMAGES_MAX,
+ num_in_map_entries, CAM_MAX_IN_RES);
+
+ return in_config_valid;
+}
+
+static bool cam_icp_mgr_is_valid_outconfig(struct cam_packet *packet)
+{
+ int i, num_out_map_entries = 0;
+ bool out_config_valid = false;
+ struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+ io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+ packet->io_configs_offset/4);
+
+ for (i = 0 ; i < packet->num_io_configs; i++)
+ if (io_cfg_ptr[i].direction == CAM_BUF_OUTPUT)
+ num_out_map_entries++;
+
+ if (num_out_map_entries <= CAM_MAX_OUT_RES) {
+ out_config_valid = true;
+ } else {
+ CAM_ERR(CAM_ICP, "Out config entries(%u) more than allowed(%u)",
+ num_out_map_entries, CAM_MAX_OUT_RES);
+ }
+
+ CAM_DBG(CAM_ICP, "number of out_config info: %u %u %u %u",
+ packet->num_io_configs, IPE_IO_IMAGES_MAX,
+ num_out_map_entries, CAM_MAX_OUT_RES);
+
+ return out_config_valid;
+}
+
+static int cam_icp_mgr_pkt_validation(struct cam_packet *packet)
+{
+ if (((packet->header.op_code & 0xff) !=
+ CAM_ICP_OPCODE_IPE_UPDATE) &&
+ ((packet->header.op_code & 0xff) !=
+ CAM_ICP_OPCODE_BPS_UPDATE)) {
+ CAM_ERR(CAM_ICP, "Invalid Opcode in pkt: %d",
+ packet->header.op_code & 0xff);
+ return -EINVAL;
+ }
+
+ if (packet->num_io_configs > IPE_IO_IMAGES_MAX) {
+ CAM_ERR(CAM_ICP, "Invalid number of io configs: %d %d",
+ IPE_IO_IMAGES_MAX, packet->num_io_configs);
+ return -EINVAL;
+ }
+
+ if (packet->num_cmd_buf > CAM_ICP_CTX_MAX_CMD_BUFFERS) {
+ CAM_ERR(CAM_ICP, "Invalid number of cmd buffers: %d %d",
+ CAM_ICP_CTX_MAX_CMD_BUFFERS, packet->num_cmd_buf);
+ return -EINVAL;
+ }
+
+ if (!cam_icp_mgr_is_valid_inconfig(packet) ||
+ !cam_icp_mgr_is_valid_outconfig(packet)) {
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ICP, "number of cmd/patch info: %u %u %u %u",
+ packet->num_cmd_buf,
+ packet->num_io_configs, IPE_IO_IMAGES_MAX,
+ packet->num_patches);
+ return 0;
+}
+
+static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_packet *packet, struct cam_icp_hw_ctx_data *ctx_data,
+ uint32_t *fw_cmd_buf_iova_addr)
+{
+ int rc = 0;
+ int i, j, k;
+ uint64_t addr;
+ size_t len;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ uintptr_t cpu_addr = 0;
+ struct ipe_frame_process_data *frame_process_data = NULL;
+ struct bps_frame_process_data *bps_frame_process_data = NULL;
+ struct frame_set *ipe_set = NULL;
+ struct frame_buffer *bps_bufs = NULL;
+
+ cmd_desc = (struct cam_cmd_buf_desc *)
+ ((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+
+ *fw_cmd_buf_iova_addr = 0;
+ for (i = 0; i < packet->num_cmd_buf; i++) {
+ if (cmd_desc[i].type == CAM_CMD_BUF_FW) {
+ rc = cam_mem_get_io_buf(cmd_desc[i].mem_handle,
+ hw_mgr->iommu_hdl, &addr, &len);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "get cmd buf failed %x",
+ hw_mgr->iommu_hdl);
+ return rc;
+ }
+ *fw_cmd_buf_iova_addr = addr;
+ *fw_cmd_buf_iova_addr =
+ (*fw_cmd_buf_iova_addr + cmd_desc[i].offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &cpu_addr, &len);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "get cmd buf failed %x",
+ hw_mgr->iommu_hdl);
+ *fw_cmd_buf_iova_addr = 0;
+ return rc;
+ }
+ cpu_addr = cpu_addr + cmd_desc[i].offset;
+ }
+ }
+
+ if (!cpu_addr) {
+ CAM_ERR(CAM_ICP, "Invalid cpu addr");
+ return -EINVAL;
+ }
+
+ if (ctx_data->icp_dev_acquire_info->dev_type !=
+ CAM_ICP_RES_TYPE_BPS) {
+ CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
+ frame_process_data = (struct ipe_frame_process_data *)cpu_addr;
+ CAM_DBG(CAM_ICP, "%u %u %u", frame_process_data->max_num_cores,
+ frame_process_data->target_time,
+ frame_process_data->frames_in_batch);
+ frame_process_data->strip_lib_out_addr = 0;
+ frame_process_data->iq_settings_addr = 0;
+ frame_process_data->scratch_buffer_addr = 0;
+ frame_process_data->ubwc_stats_buffer_addr = 0;
+ frame_process_data->cdm_buffer_addr = 0;
+ frame_process_data->cdm_prog_base = 0;
+ for (i = 0; i < frame_process_data->frames_in_batch; i++) {
+ ipe_set = &frame_process_data->framesets[i];
+ for (j = 0; j < IPE_IO_IMAGES_MAX; j++) {
+ for (k = 0; k < MAX_NUM_OF_IMAGE_PLANES; k++) {
+ ipe_set->buffers[j].buf_ptr[k] = 0;
+ ipe_set->buffers[j].meta_buf_ptr[k] = 0;
+ }
+ }
+ }
+ } else {
+ CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
+ bps_frame_process_data =
+ (struct bps_frame_process_data *)cpu_addr;
+ CAM_DBG(CAM_ICP, "%u %u",
+ bps_frame_process_data->max_num_cores,
+ bps_frame_process_data->target_time);
+ bps_frame_process_data->ubwc_stats_buffer_addr = 0;
+ bps_frame_process_data->cdm_buffer_addr = 0;
+ bps_frame_process_data->iq_settings_addr = 0;
+ bps_frame_process_data->strip_lib_out_addr = 0;
+ bps_frame_process_data->cdm_prog_addr = 0;
+ for (i = 0; i < BPS_IO_IMAGES_MAX; i++) {
+ bps_bufs = &bps_frame_process_data->buffers[i];
+ for (j = 0; j < MAX_NUM_OF_IMAGE_PLANES; j++) {
+ bps_bufs->buf_ptr[j] = 0;
+ bps_bufs->meta_buf_ptr[j] = 0;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_packet *packet,
+ struct cam_hw_prepare_update_args *prepare_args,
+ int32_t index)
+{
+ int i, j, k, rc = 0;
+ struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+ int32_t sync_in_obj[CAM_MAX_IN_RES];
+ int32_t merged_sync_in_obj;
+
+ io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+ packet->io_configs_offset/4);
+ prepare_args->num_out_map_entries = 0;
+ prepare_args->num_in_map_entries = 0;
+
+ for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
+ if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+ sync_in_obj[j++] = io_cfg_ptr[i].fence;
+ prepare_args->num_in_map_entries++;
+ } else {
+ prepare_args->out_map_entries[k++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_out_map_entries++;
+ }
+ CAM_DBG(CAM_REQ,
+ "ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u memh %x",
+ ctx_data->ctx_id, packet->header.request_id, i,
+ io_cfg_ptr[i].direction, io_cfg_ptr[i].fence,
+ io_cfg_ptr[i].resource_type,
+ io_cfg_ptr[i].mem_handle[0]);
+ }
+
+ if (prepare_args->num_in_map_entries > 1)
+ prepare_args->num_in_map_entries =
+ cam_common_util_remove_duplicate_arr(
+ sync_in_obj, prepare_args->num_in_map_entries);
+
+ if (prepare_args->num_in_map_entries > 1) {
+ rc = cam_sync_merge(&sync_in_obj[0],
+ prepare_args->num_in_map_entries, &merged_sync_in_obj);
+ if (rc) {
+ prepare_args->num_out_map_entries = 0;
+ prepare_args->num_in_map_entries = 0;
+ return rc;
+ }
+
+ ctx_data->hfi_frame_process.in_resource[index] =
+ merged_sync_in_obj;
+ prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
+ prepare_args->num_in_map_entries = 1;
+ CAM_DBG(CAM_REQ, "ctx_id: %u req_id: %llu Merged Sync obj: %d",
+ ctx_data->ctx_id, packet->header.request_id,
+ merged_sync_in_obj);
+ } else if (prepare_args->num_in_map_entries == 1) {
+ prepare_args->in_map_entries[0].sync_id = sync_in_obj[0];
+ prepare_args->num_in_map_entries = 1;
+ ctx_data->hfi_frame_process.in_resource[index] = 0;
+ } else {
+ CAM_ERR(CAM_ICP, "No input fences");
+ prepare_args->num_in_map_entries = 0;
+ ctx_data->hfi_frame_process.in_resource[index] = 0;
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_icp_process_stream_settings(
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_cmd_mem_regions *cmd_mem_regions,
+ bool map_unmap)
+{
+ int rc = 0, i = 0;
+ size_t packet_size, map_cmd_size, len;
+ uint64_t iova;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
+ struct hfi_cmd_ipe_bps_map *map_cmd;
+ struct hfi_cmd_ipebps_async *async_direct;
+
+ map_cmd_size =
+ sizeof(struct hfi_cmd_ipe_bps_map) +
+ ((cmd_mem_regions->num_regions - 1) *
+ sizeof(struct mem_map_region_data));
+
+ map_cmd = kzalloc(map_cmd_size, GFP_KERNEL);
+ if (!map_cmd)
+ return -ENOMEM;
+
+ for (i = 0; i < cmd_mem_regions->num_regions; i++) {
+ rc = cam_mem_get_io_buf(
+ cmd_mem_regions->map_info_array[i].mem_handle,
+ icp_hw_mgr.iommu_hdl, &iova, &len);
+ if (rc) {
+ CAM_ERR(CAM_ICP,
+ "Failed to get cmd region iova for handle %u",
+ cmd_mem_regions->map_info_array[i].mem_handle);
+ kfree(map_cmd);
+ return -EINVAL;
+ }
+
+ map_cmd->mem_map_region_sets[i].start_addr = (uint32_t)iova +
+ (cmd_mem_regions->map_info_array[i].offset);
+ map_cmd->mem_map_region_sets[i].len = (uint32_t) len;
+
+ CAM_DBG(CAM_ICP, "Region %u mem_handle %d iova %pK len %u",
+ (i+1), cmd_mem_regions->map_info_array[i].mem_handle,
+ (uint32_t)iova, (uint32_t)len);
+ }
+
+ map_cmd->mem_map_request_num = cmd_mem_regions->num_regions;
+ map_cmd->user_data = 0;
+
+ packet_size =
+ sizeof(struct hfi_cmd_ipebps_async) +
+ (sizeof(struct hfi_cmd_ipe_bps_map) +
+ ((cmd_mem_regions->num_regions - 1) *
+ sizeof(struct mem_map_region_data))) -
+ sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
+
+ async_direct = kzalloc(packet_size, GFP_KERNEL);
+ if (!async_direct) {
+ kfree(map_cmd);
+ return -ENOMEM;
+ }
+
+ async_direct->size = packet_size;
+ async_direct->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+ if (map_unmap)
+ async_direct->opcode = HFI_IPEBPS_CMD_OPCODE_MEM_MAP;
+ else
+ async_direct->opcode = HFI_IPEBPS_CMD_OPCODE_MEM_UNMAP;
+ async_direct->num_fw_handles = 1;
+ async_direct->fw_handles[0] = ctx_data->fw_handle;
+ async_direct->user_data1 = (uint64_t)ctx_data;
+ async_direct->user_data2 = (uint64_t)0x0;
+ memcpy(async_direct->payload.direct, map_cmd,
+ map_cmd_size);
+
+ reinit_completion(&ctx_data->wait_complete);
+ rc = hfi_write_cmd(async_direct);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "hfi write failed rc %d", rc);
+ goto end;
+ }
+
+ CAM_DBG(CAM_ICP, "Sent FW %s cmd",
+ (map_unmap == true) ? "Map" : "Unmap");
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
+ }
+
+end:
+ kfree(map_cmd);
+ kfree(async_direct);
+ return rc;
+}
+
+static int cam_icp_packet_generic_blob_handler(void *user_data,
+ uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+ struct cam_icp_clk_bw_request *soc_req;
+ struct cam_icp_clk_bw_request *clk_info;
+ struct cam_cmd_mem_regions *cmd_mem_regions;
+ struct icp_cmd_generic_blob *blob;
+ struct cam_icp_hw_ctx_data *ctx_data;
+ uint32_t index;
+ size_t io_buf_size;
+ int rc = 0;
+ uintptr_t pResource;
+
+ if (!blob_data || (blob_size == 0)) {
+ CAM_ERR(CAM_ICP, "Invalid blob info %pK %d", blob_data,
+ blob_size);
+ return -EINVAL;
+ }
+
+ blob = (struct icp_cmd_generic_blob *)user_data;
+ ctx_data = blob->ctx;
+ index = blob->frame_info_idx;
+
+ switch (blob_type) {
+ case CAM_ICP_CMD_GENERIC_BLOB_CLK:
+ if (blob_size != sizeof(struct cam_icp_clk_bw_request)) {
+ rc = -EINVAL;
+ break;
+ }
+ clk_info = &ctx_data->hfi_frame_process.clk_info[index];
+ memset(clk_info, 0, sizeof(struct cam_icp_clk_bw_request));
+
+ soc_req = (struct cam_icp_clk_bw_request *)blob_data;
+ *clk_info = *soc_req;
+ CAM_DBG(CAM_ICP, "%llu %llu %d %d %d",
+ clk_info->budget_ns, clk_info->frame_cycles,
+ clk_info->rt_flag, clk_info->uncompressed_bw,
+ clk_info->compressed_bw);
+ break;
+
+ case CAM_ICP_CMD_GENERIC_BLOB_CFG_IO:
+ CAM_DBG(CAM_ICP, "CAM_ICP_CMD_GENERIC_BLOB_CFG_IO");
+ pResource = *((uint32_t *)blob_data);
+ if (copy_from_user(&ctx_data->icp_dev_io_info,
+ (void __user *)pResource,
+ sizeof(struct cam_icp_acquire_dev_info))) {
+ CAM_ERR(CAM_ICP, "Failed in copy from user");
+ return -EFAULT;
+ }
+ CAM_DBG(CAM_ICP, "buf handle %d",
+ ctx_data->icp_dev_io_info.io_config_cmd_handle);
+ rc = cam_mem_get_io_buf(
+ ctx_data->icp_dev_io_info.io_config_cmd_handle,
+ icp_hw_mgr.iommu_hdl,
+ blob->io_buf_addr, &io_buf_size);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed in blob update");
+ else
+ CAM_DBG(CAM_ICP, "io buf addr %llu",
+ *blob->io_buf_addr);
+ break;
+
+ case CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_MAP:
+ cmd_mem_regions =
+ (struct cam_cmd_mem_regions *)blob_data;
+ if (cmd_mem_regions->num_regions <= 0) {
+ rc = -EINVAL;
+ CAM_ERR(CAM_ICP,
+ "Invalid number of regions for FW map %u",
+ cmd_mem_regions->num_regions);
+ } else {
+ CAM_DBG(CAM_ICP,
+ "Processing blob for mapping %u regions",
+ cmd_mem_regions->num_regions);
+ rc = cam_icp_process_stream_settings(ctx_data,
+ cmd_mem_regions, true);
+ }
+ break;
+
+ case CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_UNMAP:
+ cmd_mem_regions =
+ (struct cam_cmd_mem_regions *)blob_data;
+ if (cmd_mem_regions->num_regions <= 0) {
+ rc = -EINVAL;
+ CAM_ERR(CAM_ICP,
+ "Invalid number of regions for FW unmap %u",
+ cmd_mem_regions->num_regions);
+ } else {
+ CAM_DBG(CAM_ICP,
+ "Processing blob for unmapping %u regions",
+ cmd_mem_regions->num_regions);
+ rc = cam_icp_process_stream_settings(ctx_data,
+ cmd_mem_regions, false);
+ }
+ break;
+
+ default:
+ CAM_WARN(CAM_ICP, "Invalid blob type %d", blob_type);
+ break;
+ }
+ return rc;
+}
+
+static int cam_icp_process_generic_cmd_buffer(
+ struct cam_packet *packet,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ int32_t index,
+ uint64_t *io_buf_addr)
+{
+ int i, rc = 0;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct icp_cmd_generic_blob cmd_generic_blob;
+
+ cmd_generic_blob.ctx = ctx_data;
+ cmd_generic_blob.frame_info_idx = index;
+ cmd_generic_blob.io_buf_addr = io_buf_addr;
+
+ cmd_desc = (struct cam_cmd_buf_desc *)
+ ((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+ for (i = 0; i < packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ if (cmd_desc[i].meta_data != CAM_ICP_CMD_META_GENERIC_BLOB)
+ continue;
+
+ rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
+ cam_icp_packet_generic_blob_handler, &cmd_generic_blob);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed in processing blobs %d", rc);
+ }
+
+ return rc;
+}
+
+static int cam_icp_mgr_process_cfg_io_cmd(
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct hfi_cmd_ipebps_async *ioconfig_cmd,
+ uint64_t request_id,
+ uint64_t io_config)
+{
+ ioconfig_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
+ ioconfig_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+ else
+ ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+ ioconfig_cmd->num_fw_handles = 1;
+ ioconfig_cmd->fw_handles[0] = ctx_data->fw_handle;
+ ioconfig_cmd->payload.indirect = io_config;
+ ioconfig_cmd->user_data1 = PTR_TO_U64(ctx_data);
+ ioconfig_cmd->user_data2 = request_id;
+
+ return 0;
+}
+
+static int cam_icp_mgr_update_hfi_frame_process(
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_packet *packet,
+ struct cam_hw_prepare_update_args *prepare_args,
+ int32_t *idx)
+{
+ int32_t index, rc;
+ struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+
+ index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
+ ctx_data->hfi_frame_process.bits);
+ if (index < 0 || index >= CAM_FRAME_CMD_MAX) {
+ CAM_ERR(CAM_ICP, "request idx is wrong: %d", index);
+ return -EINVAL;
+ }
+ set_bit(index, ctx_data->hfi_frame_process.bitmap);
+
+ ctx_data->hfi_frame_process.request_id[index] =
+ packet->header.request_id;
+ ctx_data->hfi_frame_process.frame_info[index].request_id =
+ packet->header.request_id;
+ ctx_data->hfi_frame_process.frame_info[index].io_config = 0;
+ rc = cam_icp_process_generic_cmd_buffer(packet, ctx_data, index,
+ &ctx_data->hfi_frame_process.frame_info[index].io_config);
+ if (rc) {
+ clear_bit(index, ctx_data->hfi_frame_process.bitmap);
+ ctx_data->hfi_frame_process.request_id[index] = -1;
+ return rc;
+ }
+
+ if (ctx_data->hfi_frame_process.frame_info[index].io_config) {
+ hfi_cmd = (struct hfi_cmd_ipebps_async *)&ctx_data->
+ hfi_frame_process.frame_info[index].hfi_cfg_io_cmd;
+ rc = cam_icp_mgr_process_cfg_io_cmd(ctx_data, hfi_cmd,
+ packet->header.request_id, ctx_data->
+ hfi_frame_process.frame_info[index].io_config);
+ }
+ *idx = index;
+
+ return rc;
+}
+
+static void cam_icp_mgr_print_io_bufs(struct cam_packet *packet,
+ int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+ bool *mem_found)
+{
+ uint64_t iova_addr;
+ size_t src_buf_size;
+ int i;
+ int j;
+ int rc = 0;
+ int32_t mmu_hdl;
+
+ struct cam_buf_io_cfg *io_cfg = NULL;
+
+ if (mem_found)
+ *mem_found = false;
+
+ io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+ packet->io_configs_offset / 4);
+
+ for (i = 0; i < packet->num_io_configs; i++) {
+ for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+ if (!io_cfg[i].mem_handle[j])
+ break;
+
+ if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+ GET_FD_FROM_HANDLE(pf_buf_info)) {
+ CAM_INFO(CAM_ICP,
+ "Found PF at port: %d mem %x fd: %x",
+ io_cfg[i].resource_type,
+ io_cfg[i].mem_handle[j],
+ pf_buf_info);
+ if (mem_found)
+ *mem_found = true;
+ }
+
+ CAM_INFO(CAM_ICP, "port: %d f: %u format: %d dir %d",
+ io_cfg[i].resource_type,
+ io_cfg[i].fence,
+ io_cfg[i].format,
+ io_cfg[i].direction);
+
+ mmu_hdl = cam_mem_is_secure_buf(
+ io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+ iommu_hdl;
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+ mmu_hdl, &iova_addr, &src_buf_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_UTIL, "get src buf address fail");
+ continue;
+ }
+ if (iova_addr >> 32) {
+ CAM_ERR(CAM_ICP, "Invalid mapped address");
+ rc = -EINVAL;
+ continue;
+ }
+
+ CAM_INFO(CAM_ICP,
+ "pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+ j, io_cfg[i].planes[j].width,
+ io_cfg[i].planes[j].height,
+ (int32_t)src_buf_size,
+ (unsigned int)iova_addr,
+ io_cfg[i].offsets[j],
+ io_cfg[i].mem_handle[j]);
+
+ iova_addr += io_cfg[i].offsets[j];
+
+ }
+ }
+}
+
+static int cam_icp_mgr_config_stream_settings(
+ void *hw_mgr_priv, void *hw_stream_settings)
+{
+ int rc = 0;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct cam_packet *packet = NULL;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct icp_cmd_generic_blob cmd_generic_blob;
+ struct cam_hw_stream_setttings *config_args =
+ hw_stream_settings;
+
+ if ((!hw_stream_settings) ||
+ (!hw_mgr) || (!config_args->packet)) {
+ CAM_ERR(CAM_ICP, "Invalid input arguments");
+ return -EINVAL;
+ }
+
+ ctx_data = config_args->ctxt_to_hw_map;
+ mutex_lock(&ctx_data->ctx_mutex);
+ packet = config_args->packet;
+
+ cmd_generic_blob.ctx = ctx_data;
+ cmd_generic_blob.frame_info_idx = -1;
+ cmd_generic_blob.io_buf_addr = NULL;
+
+ cmd_desc = (struct cam_cmd_buf_desc *)
+ ((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+
+ if (!cmd_desc[0].length ||
+ cmd_desc[0].meta_data != CAM_ICP_CMD_META_GENERIC_BLOB) {
+ CAM_ERR(CAM_ICP, "Invalid cmd buffer length/metadata");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[0],
+ cam_icp_packet_generic_blob_handler, &cmd_generic_blob);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed in processing cmd mem blob %d", rc);
+
+end:
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return rc;
+}
+
+static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
+ void *prepare_hw_update_args)
+{
+ int rc = 0;
+ int32_t idx;
+ uint32_t fw_cmd_buf_iova_addr;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct cam_packet *packet = NULL;
+ struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_prepare_update_args *prepare_args =
+ prepare_hw_update_args;
+
+ if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
+ CAM_ERR(CAM_ICP, "Invalid args");
+ return -EINVAL;
+ }
+
+ ctx_data = prepare_args->ctxt_to_hw_map;
+ mutex_lock(&ctx_data->ctx_mutex);
+ if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ CAM_ERR(CAM_ICP, "ctx id: %u is not in use",
+ ctx_data->ctx_id);
+ return -EINVAL;
+ }
+
+ packet = prepare_args->packet;
+
+ rc = cam_icp_mgr_pkt_validation(packet);
+ if (rc) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return rc;
+ }
+
+ rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
+ ctx_data, &fw_cmd_buf_iova_addr);
+ if (rc) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return rc;
+ }
+
+ prepare_args->pf_data->packet = packet;
+
+ CAM_DBG(CAM_REQ, "req id = %lld for ctx = %u",
+ packet->header.request_id, ctx_data->ctx_id);
+ /* Update Buffer Address from handles and patch information */
+ rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
+ hw_mgr->iommu_sec_hdl);
+ if (rc) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return rc;
+ }
+
+ rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
+ prepare_args, &idx);
+ if (rc) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return rc;
+ }
+
+ rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+ packet, prepare_args, idx);
+ if (rc) {
+ if (ctx_data->hfi_frame_process.in_resource[idx] > 0)
+ cam_sync_destroy(
+ ctx_data->hfi_frame_process.in_resource[idx]);
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ ctx_data->hfi_frame_process.request_id[idx] = -1;
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return rc;
+ }
+
+ hfi_cmd = (struct hfi_cmd_ipebps_async *)
+ &ctx_data->hfi_frame_process.hfi_frame_cmd[idx];
+ cam_icp_mgr_prepare_frame_process_cmd(
+ ctx_data, hfi_cmd, packet->header.request_id,
+ fw_cmd_buf_iova_addr);
+
+ prepare_args->num_hw_update_entries = 1;
+ prepare_args->hw_update_entries[0].addr = (uintptr_t)hfi_cmd;
+ prepare_args->priv = &ctx_data->hfi_frame_process.frame_info[idx];
+
+ CAM_DBG(CAM_ICP, "X: req id = %lld ctx_id = %u",
+ packet->header.request_id, ctx_data->ctx_id);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return rc;
+}
+
+static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->request_id[idx])
+ continue;
+
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, true,
+ &hfi_frame_process->request_id[idx]);
+
+ /* now release memory for hfi frame process command */
+ hfi_frame_process->request_id[idx] = 0;
+ if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+ CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+ ctx_data->hfi_frame_process.in_resource[idx]);
+ cam_sync_destroy(
+ ctx_data->hfi_frame_process.in_resource[idx]);
+ ctx_data->hfi_frame_process.in_resource[idx] = 0;
+ }
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ }
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return 0;
+}
+
+static int cam_icp_mgr_delete_sync(void *priv, void *data)
+{
+ struct hfi_cmd_work_data *task_data = NULL;
+ struct cam_icp_hw_ctx_data *ctx_data;
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
+ return -EINVAL;
+ }
+
+ task_data = (struct hfi_cmd_work_data *)data;
+ ctx_data = task_data->data;
+
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "Null Context");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->in_free_resource[idx])
+ continue;
+ //cam_sync_destroy(
+ //ctx_data->hfi_frame_process.in_free_resource[idx]);
+ ctx_data->hfi_frame_process.in_resource[idx] = 0;
+ }
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return 0;
+}
+
+static int cam_icp_mgr_delete_sync_obj(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int rc = 0;
+ struct crm_workq_task *task;
+ struct hfi_cmd_work_data *task_data;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ return -ENOMEM;
+ }
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)ctx_data;
+ task_data->request_id = 0;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_delete_sync;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+
+ return rc;
+}
+
+static int cam_icp_mgr_flush_all(struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_hw_flush_args *flush_args)
+{
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+ bool clear_in_resource = false;
+
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->request_id[idx])
+ continue;
+
+ /* now release memory for hfi frame process command */
+ hfi_frame_process->request_id[idx] = 0;
+ if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+ ctx_data->hfi_frame_process.in_free_resource[idx] =
+ ctx_data->hfi_frame_process.in_resource[idx];
+ ctx_data->hfi_frame_process.in_resource[idx] = 0;
+ }
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ clear_in_resource = true;
+ }
+
+ if (clear_in_resource)
+ cam_icp_mgr_delete_sync_obj(ctx_data);
+
+ return 0;
+}
+
+static int cam_icp_mgr_flush_req(struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_hw_flush_args *flush_args)
+{
+ int64_t request_id;
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+ bool clear_in_resource = false;
+
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ request_id = *(int64_t *)flush_args->flush_req_pending[0];
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->request_id[idx])
+ continue;
+
+ if (hfi_frame_process->request_id[idx] != request_id)
+ continue;
+
+ /* now release memory for hfi frame process command */
+ hfi_frame_process->request_id[idx] = 0;
+ if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+ ctx_data->hfi_frame_process.in_free_resource[idx] =
+ ctx_data->hfi_frame_process.in_resource[idx];
+ ctx_data->hfi_frame_process.in_resource[idx] = 0;
+ }
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ clear_in_resource = true;
+ }
+
+ if (clear_in_resource)
+ cam_icp_mgr_delete_sync_obj(ctx_data);
+
+ return 0;
+}
+
+static int cam_icp_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
+{
+ struct cam_hw_flush_args *flush_args = hw_flush_args;
+ struct cam_icp_hw_ctx_data *ctx_data;
+ struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+
+ if ((!hw_priv) || (!hw_flush_args)) {
+ CAM_ERR(CAM_ICP, "Input params are Null:");
+ return -EINVAL;
+ }
+
+ ctx_data = flush_args->ctxt_to_hw_map;
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "Ctx data is NULL");
+ return -EINVAL;
+ }
+
+ if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+ (flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+ CAM_ERR(CAM_ICP, "Invalid lush type: %d",
+ flush_args->flush_type);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_REQ, "ctx_id %d Flush type %d",
+ ctx_data->ctx_id, flush_args->flush_type);
+
+ switch (flush_args->flush_type) {
+ case CAM_FLUSH_TYPE_ALL:
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!atomic_read(&hw_mgr->recovery)
+ && flush_args->num_req_active) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ cam_icp_mgr_abort_handle(ctx_data);
+ } else {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ }
+ mutex_lock(&ctx_data->ctx_mutex);
+ cam_icp_mgr_flush_all(ctx_data, flush_args);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ break;
+ case CAM_FLUSH_TYPE_REQ:
+ mutex_lock(&ctx_data->ctx_mutex);
+ if (flush_args->num_req_active) {
+ CAM_ERR(CAM_ICP, "Flush request is not supported");
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EINVAL;
+ }
+ if (flush_args->num_req_pending)
+ cam_icp_mgr_flush_req(ctx_data, flush_args);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid flush type: %d",
+ flush_args->flush_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+ int rc = 0;
+ int ctx_id = 0;
+ struct cam_hw_release_args *release_hw = release_hw_args;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+ if (!release_hw || !hw_mgr) {
+ CAM_ERR(CAM_ICP, "Invalid args: %pK %pK", release_hw, hw_mgr);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ICP, "Enter recovery set %d",
+ atomic_read(&hw_mgr->recovery));
+ ctx_data = release_hw->ctxt_to_hw_map;
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "NULL ctx data");
+ return -EINVAL;
+ }
+
+ ctx_id = ctx_data->ctx_id;
+ if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
+ CAM_ERR(CAM_ICP, "Invalid ctx id: %d", ctx_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+ if (hw_mgr->ctx_data[ctx_id].state != CAM_ICP_CTX_STATE_ACQUIRED) {
+ CAM_DBG(CAM_ICP, "ctx is not in use: %d", ctx_id);
+ mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!atomic_read(&hw_mgr->recovery) && release_hw->active_req) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ cam_icp_mgr_abort_handle(ctx_data);
+ cam_icp_mgr_send_abort_status(ctx_data);
+ } else {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
+ if (!hw_mgr->ctxt_cnt) {
+ CAM_DBG(CAM_ICP, "Last Release");
+ cam_icp_mgr_icp_power_collapse(hw_mgr);
+ cam_icp_hw_mgr_reset_clk_info(hw_mgr);
+ rc = cam_ipe_bps_deint(hw_mgr);
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ if ((!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt))
+ cam_icp_device_timer_stop(hw_mgr);
+
+ CAM_DBG(CAM_ICP, "Release done for ctx_id %d", ctx_id);
+ return rc;
+}
+
+static int cam_icp_mgr_create_handle(uint32_t dev_type,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct hfi_cmd_create_handle create_handle;
+ struct hfi_cmd_work_data *task_data;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
+ struct crm_workq_task *task;
+ int rc = 0;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
+ create_handle.size = sizeof(struct hfi_cmd_create_handle);
+ create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
+ create_handle.handle_type = dev_type;
+ create_handle.user_data1 = PTR_TO_U64(ctx_data);
+ reinit_completion(&ctx_data->wait_complete);
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)&create_handle;
+ task_data->request_id = 0;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
+ }
+
+ if (ctx_data->fw_handle == 0) {
+ CAM_ERR(CAM_ICP, "Invalid handle created");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct hfi_cmd_ping_pkt ping_pkt;
+ struct hfi_cmd_work_data *task_data;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
+ struct crm_workq_task *task;
+ int rc = 0;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "No free task to send ping command");
+ return -ENOMEM;
+ }
+
+ ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
+ ping_pkt.pkt_type = HFI_CMD_SYS_PING;
+ ping_pkt.user_data = PTR_TO_U64(ctx_data);
+ init_completion(&ctx_data->wait_complete);
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)&ping_pkt;
+ task_data->request_id = 0;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ cam_hfi_queue_dump();
+ }
+
+ return rc;
+}
+
+static int cam_icp_get_acquire_info(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_hw_acquire_args *args,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int i;
+ int acquire_size;
+ struct cam_icp_acquire_dev_info icp_dev_acquire_info;
+ struct cam_icp_res_info *p_icp_out = NULL;
+
+ if (copy_from_user(&icp_dev_acquire_info,
+ (void __user *)args->acquire_info,
+ sizeof(struct cam_icp_acquire_dev_info))) {
+ CAM_ERR(CAM_ICP, "Failed in acquire");
+ return -EFAULT;
+ }
+
+ if (icp_dev_acquire_info.secure_mode > CAM_SECURE_MODE_SECURE) {
+ CAM_ERR(CAM_ICP, "Invalid mode:%d",
+ icp_dev_acquire_info.secure_mode);
+ return -EINVAL;
+ }
+
+ if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
+ CAM_ERR(CAM_ICP, "num of out resources exceeding : %u",
+ icp_dev_acquire_info.num_out_res);
+ return -EINVAL;
+ }
+
+ if (icp_dev_acquire_info.dev_type >= CAM_ICP_RES_TYPE_MAX) {
+ CAM_ERR(CAM_ICP, "Invalid device type: %d",
+ icp_dev_acquire_info.dev_type);
+ return -EFAULT;
+ }
+
+ acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
+ ((icp_dev_acquire_info.num_out_res - 1) *
+ sizeof(struct cam_icp_res_info));
+ ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
+ if (!ctx_data->icp_dev_acquire_info)
+ return -ENOMEM;
+
+ if (copy_from_user(ctx_data->icp_dev_acquire_info,
+ (void __user *)args->acquire_info, acquire_size)) {
+ CAM_ERR(CAM_ICP, "Failed in acquire: size = %d", acquire_size);
+ kfree(ctx_data->icp_dev_acquire_info);
+ ctx_data->icp_dev_acquire_info = NULL;
+ return -EFAULT;
+ }
+
+ CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x",
+ ctx_data->icp_dev_acquire_info->dev_type,
+ ctx_data->icp_dev_acquire_info->in_res.format,
+ ctx_data->icp_dev_acquire_info->in_res.width,
+ ctx_data->icp_dev_acquire_info->in_res.height,
+ ctx_data->icp_dev_acquire_info->in_res.fps,
+ ctx_data->icp_dev_acquire_info->num_out_res,
+ ctx_data->icp_dev_acquire_info->scratch_mem_size);
+
+ p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
+ for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
+ CAM_DBG(CAM_ICP, "out[i] %x %x %x %x",
+ p_icp_out[i].format,
+ p_icp_out[i].width,
+ p_icp_out[i].height,
+ p_icp_out[i].fps);
+
+ return 0;
+}
+
+static const char *cam_icp_dev_type_to_name(
+ uint32_t dev_type)
+{
+ switch (dev_type) {
+ case CAM_ICP_RES_TYPE_BPS:
+ return "BPS";
+ case CAM_ICP_RES_TYPE_IPE_RT:
+ return "IPE_RT";
+ case CAM_ICP_RES_TYPE_IPE:
+ return "IPE";
+ default:
+ return "Invalid dev type";
+ }
+}
+
+static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+ int rc = 0, bitmap_size = 0;
+ uint32_t ctx_id = 0;
+ uint64_t io_buf_addr;
+ size_t io_buf_size;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct cam_hw_acquire_args *args = acquire_hw_args;
+ struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
+ struct cam_cmd_mem_regions cmd_mem_region;
+
+ if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", hw_mgr_priv,
+ acquire_hw_args);
+ return -EINVAL;
+ }
+
+ if (args->num_acq > 1) {
+ CAM_ERR(CAM_ICP, "number of resources are wrong: %u",
+ args->num_acq);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ICP, "ENTER");
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
+ if (ctx_id >= CAM_ICP_CTX_MAX) {
+ CAM_ERR(CAM_ICP, "No free ctx space in hw_mgr");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -ENOSPC;
+ }
+ ctx_data = &hw_mgr->ctx_data[ctx_id];
+ ctx_data->ctx_id = ctx_id;
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ rc = cam_icp_get_acquire_info(hw_mgr, args, ctx_data);
+ if (rc)
+ goto acquire_info_failed;
+
+ icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
+
+ CAM_DBG(CAM_ICP, "acquire io buf handle %d",
+ icp_dev_acquire_info->io_config_cmd_handle);
+ rc = cam_mem_get_io_buf(
+ icp_dev_acquire_info->io_config_cmd_handle,
+ hw_mgr->iommu_hdl,
+ &io_buf_addr, &io_buf_size);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
+ goto get_io_buf_failed;
+ }
+
+ CAM_DBG(CAM_ICP, "hdl: %d, addr: %pK, size: %zu",
+ icp_dev_acquire_info->io_config_cmd_handle,
+ (void *)io_buf_addr, io_buf_size);
+
+ if (!hw_mgr->ctxt_cnt) {
+ rc = cam_icp_clk_info_init(hw_mgr, ctx_data);
+ if (rc)
+ goto get_io_buf_failed;
+
+ rc = cam_icp_mgr_icp_resume(hw_mgr);
+ if (rc)
+ goto get_io_buf_failed;
+
+ if (icp_hw_mgr.a5_debug_type)
+ hfi_set_debug_level(icp_hw_mgr.a5_debug_type,
+ icp_hw_mgr.a5_dbg_lvl);
+
+ hfi_set_fw_dump_level(icp_hw_mgr.a5_fw_dump_lvl);
+
+ rc = cam_icp_send_ubwc_cfg(hw_mgr);
+ if (rc)
+ goto ubwc_cfg_failed;
+ }
+
+
+ rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
+ if (rc)
+ goto ipe_bps_resume_failed;
+
+ rc = cam_icp_mgr_send_ping(ctx_data);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "ping ack not received");
+ goto send_ping_failed;
+ }
+ CAM_DBG(CAM_ICP, "ping ack received");
+
+ rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
+ ctx_data);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "create handle failed");
+ goto create_handle_failed;
+ }
+
+ cmd_mem_region.num_regions = 1;
+ cmd_mem_region.map_info_array[0].mem_handle =
+ icp_dev_acquire_info->io_config_cmd_handle;
+ cmd_mem_region.map_info_array[0].offset = 0;
+ cmd_mem_region.map_info_array[0].size =
+ icp_dev_acquire_info->io_config_cmd_size;
+ cmd_mem_region.map_info_array[0].flags = 0;
+
+ rc = cam_icp_process_stream_settings(ctx_data,
+ &cmd_mem_region, true);
+ if (rc) {
+ CAM_ERR(CAM_ICP,
+ "sending config io mapping failed rc %d", rc);
+ goto send_map_info_failed;
+ }
+
+ rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "IO Config command failed %d", rc);
+ goto ioconfig_failed;
+ }
+
+ rc = cam_icp_process_stream_settings(ctx_data,
+ &cmd_mem_region, false);
+ if (rc) {
+ CAM_ERR(CAM_ICP,
+ "sending config io unmapping failed %d", rc);
+ goto send_map_info_failed;
+ }
+
+ ctx_data->context_priv = args->context_data;
+ args->ctxt_to_hw_map = ctx_data;
+
+ bitmap_size = BITS_TO_LONGS(CAM_FRAME_CMD_MAX) * sizeof(long);
+ ctx_data->hfi_frame_process.bitmap =
+ kzalloc(bitmap_size, GFP_KERNEL);
+ if (!ctx_data->hfi_frame_process.bitmap)
+ goto ioconfig_failed;
+
+ ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE;
+ hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+ icp_dev_acquire_info->scratch_mem_size = ctx_data->scratch_mem_size;
+
+ if (copy_to_user((void __user *)args->acquire_info,
+ icp_dev_acquire_info, sizeof(struct cam_icp_acquire_dev_info)))
+ goto copy_to_user_failed;
+
+ cam_icp_ctx_clk_info_init(ctx_data);
+ ctx_data->state = CAM_ICP_CTX_STATE_ACQUIRED;
+ mutex_unlock(&ctx_data->ctx_mutex);
+ CAM_DBG(CAM_ICP, "scratch size = %x fw_handle = %x",
+ (unsigned int)icp_dev_acquire_info->scratch_mem_size,
+ (unsigned int)ctx_data->fw_handle);
+ /* Start device timer*/
+ if (((hw_mgr->bps_ctxt_cnt == 1) || (hw_mgr->ipe_ctxt_cnt == 1)))
+ cam_icp_device_timer_start(hw_mgr);
+ /* Start context timer*/
+ cam_icp_ctx_timer_start(ctx_data);
+ hw_mgr->ctxt_cnt++;
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_DBG(CAM_ICP, "Acquire Done for ctx_id %u dev name %s dev type %d",
+ ctx_data->ctx_id, cam_icp_dev_type_to_name(
+ icp_dev_acquire_info->dev_type),
+ icp_dev_acquire_info->dev_type);
+
+ return 0;
+
+copy_to_user_failed:
+ kfree(ctx_data->hfi_frame_process.bitmap);
+ ctx_data->hfi_frame_process.bitmap = NULL;
+ioconfig_failed:
+ cam_icp_process_stream_settings(ctx_data,
+ &cmd_mem_region, false);
+send_map_info_failed:
+ cam_icp_mgr_destroy_handle(ctx_data);
+create_handle_failed:
+send_ping_failed:
+ cam_icp_mgr_ipe_bps_power_collapse(hw_mgr, ctx_data, 0);
+ipe_bps_resume_failed:
+ubwc_cfg_failed:
+ if (!hw_mgr->ctxt_cnt)
+ cam_icp_mgr_icp_power_collapse(hw_mgr);
+get_io_buf_failed:
+ kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+ hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+acquire_info_failed:
+ cam_icp_mgr_put_ctx(ctx_data);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+}
+
+static int cam_icp_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+ int rc = 0;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+ if ((!hw_mgr_priv) || (!hw_caps_args)) {
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK",
+ hw_mgr_priv, hw_caps_args);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (copy_from_user(&icp_hw_mgr.icp_caps,
+ u64_to_user_ptr(query_cap->caps_handle),
+ sizeof(struct cam_icp_query_cap_cmd))) {
+ CAM_ERR(CAM_ICP, "copy_from_user failed");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ rc = hfi_get_hw_caps(&icp_hw_mgr.icp_caps);
+ if (rc)
+ goto end;
+
+ icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
+ icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
+
+ if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
+ &icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
+ CAM_ERR(CAM_ICP, "copy_to_user failed");
+ rc = -EFAULT;
+ }
+end:
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+}
+
+static int cam_icp_mgr_alloc_devs(struct device_node *of_node)
+{
+ int rc;
+ uint32_t num_dev;
+
+ rc = of_property_read_u32(of_node, "num-a5", &num_dev);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "getting num of a5 failed");
+ goto num_a5_failed;
+ }
+
+ icp_hw_mgr.devices[CAM_ICP_DEV_A5] = kzalloc(
+ sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+ if (!icp_hw_mgr.devices[CAM_ICP_DEV_A5]) {
+ rc = -ENOMEM;
+ goto num_a5_failed;
+ }
+
+ rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "getting number of ipe dev nodes failed");
+ goto num_ipe_failed;
+ }
+
+ if (!icp_hw_mgr.ipe1_enable)
+ num_dev = 1;
+
+ icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kcalloc(num_dev,
+ sizeof(struct cam_hw_intf *), GFP_KERNEL);
+ if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
+ rc = -ENOMEM;
+ goto num_ipe_failed;
+ }
+
+ rc = of_property_read_u32(of_node, "num-bps", &num_dev);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "read num bps devices failed");
+ goto num_bps_failed;
+ }
+ icp_hw_mgr.devices[CAM_ICP_DEV_BPS] = kcalloc(num_dev,
+ sizeof(struct cam_hw_intf *), GFP_KERNEL);
+ if (!icp_hw_mgr.devices[CAM_ICP_DEV_BPS]) {
+ rc = -ENOMEM;
+ goto num_bps_failed;
+ }
+
+ icp_hw_mgr.ipe_bps_pc_flag = of_property_read_bool(of_node,
+ "ipe_bps_pc_en");
+
+ icp_hw_mgr.icp_pc_flag = of_property_read_bool(of_node,
+ "icp_pc_en");
+
+ return 0;
+num_bps_failed:
+ kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+num_ipe_failed:
+ kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+num_a5_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_init_devs(struct device_node *of_node)
+{
+ int rc = 0;
+ int count, i;
+ const char *name = NULL;
+ struct device_node *child_node = NULL;
+ struct platform_device *child_pdev = NULL;
+ struct cam_hw_intf *child_dev_intf = NULL;
+
+ rc = cam_icp_mgr_alloc_devs(of_node);
+ if (rc)
+ return rc;
+
+ count = of_property_count_strings(of_node, "compat-hw-name");
+ if (!count) {
+ CAM_ERR(CAM_ICP, "no compat hw found in dev tree, cnt = %d",
+ count);
+ rc = -EINVAL;
+ goto compat_hw_name_failed;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "compat-hw-name",
+ i, &name);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "getting dev object name failed");
+ goto compat_hw_name_failed;
+ }
+
+ child_node = of_find_node_by_name(NULL, name);
+ if (!child_node) {
+ CAM_ERR(CAM_ICP, "Cannot find node in dtsi %s", name);
+ rc = -ENODEV;
+ goto compat_hw_name_failed;
+ }
+
+ child_pdev = of_find_device_by_node(child_node);
+ if (!child_pdev) {
+ CAM_ERR(CAM_ICP, "failed to find device on bus %s",
+ child_node->name);
+ rc = -ENODEV;
+ of_node_put(child_node);
+ goto compat_hw_name_failed;
+ }
+
+ child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+ child_pdev);
+ if (!child_dev_intf) {
+ CAM_ERR(CAM_ICP, "no child device");
+ of_node_put(child_node);
+ if (!icp_hw_mgr.ipe1_enable)
+ continue;
+ goto compat_hw_name_failed;
+ }
+ icp_hw_mgr.devices[child_dev_intf->hw_type]
+ [child_dev_intf->hw_idx] = child_dev_intf;
+
+ if (!child_dev_intf->hw_ops.process_cmd)
+ goto compat_hw_name_failed;
+
+ of_node_put(child_node);
+ }
+
+ icp_hw_mgr.a5_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_A5][0];
+ icp_hw_mgr.bps_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_BPS][0];
+ icp_hw_mgr.ipe0_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_IPE][0];
+ if (icp_hw_mgr.ipe1_enable)
+ icp_hw_mgr.ipe1_dev_intf =
+ icp_hw_mgr.devices[CAM_ICP_DEV_IPE][1];
+
+ return 0;
+compat_hw_name_failed:
+ kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
+ kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+ kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+ return rc;
+}
+
+static int cam_icp_mgr_create_wq(void)
+{
+ int rc;
+ int i;
+
+ rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
+ &icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ,
+ 0);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to create a command worker");
+ goto cmd_work_failed;
+ }
+
+ rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
+ &icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ, 0);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to create a message worker");
+ goto msg_work_failed;
+ }
+
+ rc = cam_req_mgr_workq_create("icp_timer_queue", ICP_WORKQ_NUM_TASK,
+ &icp_hw_mgr.timer_work, CRM_WORKQ_USAGE_IRQ, 0);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to create a timer worker");
+ goto timer_work_failed;
+ }
+
+ icp_hw_mgr.cmd_work_data = (struct hfi_cmd_work_data *)
+ kzalloc(sizeof(struct hfi_cmd_work_data) * ICP_WORKQ_NUM_TASK,
+ GFP_KERNEL);
+ if (!icp_hw_mgr.cmd_work_data)
+ goto cmd_work_data_failed;
+
+ icp_hw_mgr.msg_work_data = (struct hfi_msg_work_data *)
+ kzalloc(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK,
+ GFP_KERNEL);
+ if (!icp_hw_mgr.msg_work_data)
+ goto msg_work_data_failed;
+
+ icp_hw_mgr.timer_work_data = (struct hfi_msg_work_data *)
+ kzalloc(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK,
+ GFP_KERNEL);
+ if (!icp_hw_mgr.timer_work_data)
+ goto timer_work_data_failed;
+
+ rc = cam_icp_hw_mgr_create_debugfs_entry();
+ if (rc)
+ goto debugfs_create_failed;
+
+ for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+ icp_hw_mgr.msg_work->task.pool[i].payload =
+ &icp_hw_mgr.msg_work_data[i];
+
+ for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+ icp_hw_mgr.cmd_work->task.pool[i].payload =
+ &icp_hw_mgr.cmd_work_data[i];
+
+ for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+ icp_hw_mgr.timer_work->task.pool[i].payload =
+ &icp_hw_mgr.timer_work_data[i];
+ return 0;
+
+debugfs_create_failed:
+ kfree(icp_hw_mgr.timer_work_data);
+timer_work_data_failed:
+ kfree(icp_hw_mgr.msg_work_data);
+msg_work_data_failed:
+ kfree(icp_hw_mgr.cmd_work_data);
+cmd_work_data_failed:
+ cam_req_mgr_workq_destroy(&icp_hw_mgr.timer_work);
+timer_work_failed:
+ cam_req_mgr_workq_destroy(&icp_hw_mgr.msg_work);
+msg_work_failed:
+ cam_req_mgr_workq_destroy(&icp_hw_mgr.cmd_work);
+cmd_work_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+
+ if (!hw_mgr_priv || !cmd_args) {
+ CAM_ERR(CAM_ICP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ switch (hw_cmd_args->cmd_type) {
+ case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+ cam_icp_mgr_print_io_bufs(
+ hw_cmd_args->u.pf_args.pf_data.packet,
+ hw_mgr->iommu_hdl,
+ hw_mgr->iommu_sec_hdl,
+ hw_cmd_args->u.pf_args.buf_info,
+ hw_cmd_args->u.pf_args.mem_found);
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid cmd");
+ }
+
+ return rc;
+}
+
+int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+ int *iommu_hdl)
+{
+ int i, rc = 0;
+ struct cam_hw_mgr_intf *hw_mgr_intf;
+ struct cam_cpas_query_cap query;
+ uint32_t cam_caps;
+
+ hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+ if (!of_node || !hw_mgr_intf) {
+ CAM_ERR(CAM_ICP, "Invalid args of_node %pK hw_mgr %pK",
+ of_node, hw_mgr_intf);
+ return -EINVAL;
+ }
+
+ hw_mgr_intf->hw_mgr_priv = &icp_hw_mgr;
+ hw_mgr_intf->hw_get_caps = cam_icp_mgr_get_hw_caps;
+ hw_mgr_intf->hw_acquire = cam_icp_mgr_acquire_hw;
+ hw_mgr_intf->hw_release = cam_icp_mgr_release_hw;
+ hw_mgr_intf->hw_prepare_update = cam_icp_mgr_prepare_hw_update;
+ hw_mgr_intf->hw_config_stream_settings =
+ cam_icp_mgr_config_stream_settings;
+ hw_mgr_intf->hw_config = cam_icp_mgr_config_hw;
+ hw_mgr_intf->hw_open = cam_icp_mgr_hw_open_u;
+ hw_mgr_intf->hw_close = cam_icp_mgr_hw_close_u;
+ hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush;
+ hw_mgr_intf->hw_cmd = cam_icp_mgr_cmd;
+
+ icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
+ mutex_init(&icp_hw_mgr.hw_mgr_mutex);
+ spin_lock_init(&icp_hw_mgr.hw_mgr_lock);
+
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+ mutex_init(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+
+ cam_cpas_get_hw_info(&query.camera_family,
+ &query.camera_version, &query.cpas_version, &cam_caps);
+ if (cam_caps & CPAS_IPE0_BIT)
+ icp_hw_mgr.ipe0_enable = true;
+ if (cam_caps & CPAS_IPE1_BIT)
+ icp_hw_mgr.ipe1_enable = true;
+ if (cam_caps & CPAS_BPS_BIT)
+ icp_hw_mgr.bps_enable = true;
+
+ rc = cam_icp_mgr_init_devs(of_node);
+ if (rc)
+ goto dev_init_failed;
+
+ rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "get mmu handle failed: %d", rc);
+ goto icp_get_hdl_failed;
+ }
+
+ rc = cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "icp attach failed: %d", rc);
+ goto icp_attach_failed;
+ }
+
+ rc = cam_smmu_get_handle("cam-secure", &icp_hw_mgr.iommu_sec_hdl);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "get secure mmu handle failed: %d", rc);
+ goto secure_hdl_failed;
+ }
+
+ rc = cam_icp_mgr_create_wq();
+ if (rc)
+ goto icp_wq_create_failed;
+
+ if (iommu_hdl)
+ *iommu_hdl = icp_hw_mgr.iommu_hdl;
+
+ init_completion(&icp_hw_mgr.a5_complete);
+ return rc;
+
+icp_wq_create_failed:
+ cam_smmu_destroy_handle(icp_hw_mgr.iommu_sec_hdl);
+ icp_hw_mgr.iommu_sec_hdl = -1;
+secure_hdl_failed:
+ cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
+icp_attach_failed:
+ cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
+ icp_hw_mgr.iommu_hdl = -1;
+icp_get_hdl_failed:
+ kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
+ kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+ kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+dev_init_failed:
+ mutex_destroy(&icp_hw_mgr.hw_mgr_mutex);
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+ mutex_destroy(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
new file mode 100644
index 000000000000..234643e3d105
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -0,0 +1,369 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_ICP_HW_MGR_H
+#define CAM_ICP_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_icp.h>
+#include "cam_icp_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_a5_hw_intf.h"
+#include "hfi_session_defs.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+#include "cam_soc_util.h"
+#include "cam_req_mgr_timer.h"
+
+#define CAM_ICP_ROLE_PARENT 1
+#define CAM_ICP_ROLE_CHILD 2
+
+#define CAM_FRAME_CMD_MAX 20
+
+#define CAM_MAX_OUT_RES 6
+#define CAM_MAX_IN_RES 8
+
+#define ICP_WORKQ_NUM_TASK 100
+#define ICP_WORKQ_TASK_CMD_TYPE 1
+#define ICP_WORKQ_TASK_MSG_TYPE 2
+
+#define ICP_PACKET_SIZE 0
+#define ICP_PACKET_TYPE 1
+#define ICP_PACKET_OPCODE 2
+#define ICP_MAX_OUTPUT_SUPPORTED 6
+
+#define ICP_FRAME_PROCESS_SUCCESS 0
+#define ICP_FRAME_PROCESS_FAILURE 1
+#define ICP_MSG_BUF_SIZE 256
+#define ICP_DBG_BUF_SIZE 102400
+
+#define ICP_CLK_HW_IPE 0x0
+#define ICP_CLK_HW_BPS 0x1
+#define ICP_CLK_HW_MAX 0x2
+
+#define ICP_OVER_CLK_THRESHOLD 5
+
+#define CPAS_IPE0_BIT 0x1000
+#define CPAS_IPE1_BIT 0x2000
+#define CPAS_BPS_BIT 0x400
+
+#define ICP_PWR_CLP_BPS 0x00000001
+#define ICP_PWR_CLP_IPE0 0x00010000
+#define ICP_PWR_CLP_IPE1 0x00020000
+
+#define CAM_ICP_CTX_STATE_FREE 0x0
+#define CAM_ICP_CTX_STATE_IN_USE 0x1
+#define CAM_ICP_CTX_STATE_ACQUIRED 0x2
+#define CAM_ICP_CTX_STATE_RELEASE 0x3
+
+#define CAM_ICP_CTX_MAX_CMD_BUFFERS 0x2
+
+/**
+ * struct icp_hfi_mem_info
+ * @qtbl: Memory info of queue table
+ * @cmd_q: Memory info of command queue
+ * @msg_q: Memory info of message queue
+ * @dbg_q: Memory info of debug queue
+ * @sec_heap: Memory info of secondary heap
+ * @fw_buf: Memory info of firmware
+ * @qdss_buf: Memory info of qdss
+ * @sfr_buf: Memory info for sfr buffer
+ */
+struct icp_hfi_mem_info {
+ struct cam_mem_mgr_memory_desc qtbl;
+ struct cam_mem_mgr_memory_desc cmd_q;
+ struct cam_mem_mgr_memory_desc msg_q;
+ struct cam_mem_mgr_memory_desc dbg_q;
+ struct cam_mem_mgr_memory_desc sec_heap;
+ struct cam_mem_mgr_memory_desc fw_buf;
+ struct cam_mem_mgr_memory_desc qdss_buf;
+ struct cam_mem_mgr_memory_desc sfr_buf;
+ struct cam_smmu_region_info shmem;
+};
+
+/**
+ * struct hfi_cmd_work_data
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct hfi_cmd_work_data {
+ uint32_t type;
+ void *data;
+ int32_t request_id;
+};
+
+/**
+ * struct hfi_msg_work_data
+ * @type: Task type
+ * @data: Pointer to message data
+ * @irq_status: IRQ status
+ */
+struct hfi_msg_work_data {
+ uint32_t type;
+ void *data;
+ uint32_t irq_status;
+};
+
+/**
+ * struct clk_work_data
+ * @type: Task type
+ * @data: Pointer to clock info
+ */
+struct clk_work_data {
+ uint32_t type;
+ void *data;
+};
+
+/**
+ * struct icp_frame_info
+ * @request_id: request id
+ * @io_config: the address of io config
+ * @hfi_cfg_io_cmd: command struct to be sent to hfi
+ */
+struct icp_frame_info {
+ uint64_t request_id;
+ uint64_t io_config;
+ struct hfi_cmd_ipebps_async hfi_cfg_io_cmd;
+};
+
+
+/**
+ * struct hfi_frame_process_info
+ * @hfi_frame_cmd: Frame process command info
+ * @bitmap: Bitmap for hfi_frame_cmd
+ * @bits: Used in hfi_frame_cmd bitmap
+ * @lock: Lock for hfi_frame_cmd
+ * @request_id: Request id list
+ * @num_out_resources: Number of out syncs
+ * @out_resource: Out sync info
+ * @fw_process_flag: Frame process flag
+ * @clk_info: Clock information for a request
+ * @frame_info: information needed to process request
+ */
+struct hfi_frame_process_info {
+ struct hfi_cmd_ipebps_async hfi_frame_cmd[CAM_FRAME_CMD_MAX];
+ void *bitmap;
+ size_t bits;
+ struct mutex lock;
+ uint64_t request_id[CAM_FRAME_CMD_MAX];
+ uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
+ uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
+ uint32_t in_resource[CAM_FRAME_CMD_MAX];
+ uint32_t in_free_resource[CAM_FRAME_CMD_MAX];
+ uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
+ struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
+ struct icp_frame_info frame_info[CAM_FRAME_CMD_MAX];
+};
+
+/**
+ * struct cam_ctx_clk_info
+ * @curr_fc: Context latest request frame cycles
+ * @rt_flag: Flag to indicate real time request
+ * @base_clk: Base clock to process the request
+ * @reserved: Reserved field
+ * #uncompressed_bw: Current bandwidth voting
+ * @compressed_bw: Current compressed bandwidth voting
+ * @clk_rate: Supported clock rates for the context
+ */
+struct cam_ctx_clk_info {
+ uint32_t curr_fc;
+ uint32_t rt_flag;
+ uint32_t base_clk;
+ uint32_t reserved;
+ uint64_t uncompressed_bw;
+ uint64_t compressed_bw;
+ int32_t clk_rate[CAM_MAX_VOTE];
+};
+/**
+ * struct cam_icp_hw_ctx_data
+ * @context_priv: Context private data
+ * @ctx_mutex: Mutex for context
+ * @fw_handle: Firmware handle
+ * @scratch_mem_size: Scratch memory size
+ * @acquire_dev_cmd: Acquire command
+ * @icp_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @state: context state
+ * @role: Role of a context in case of chaining
+ * @chain_ctx: Peer context
+ * @hfi_frame_process: Frame process command
+ * @wait_complete: Completion info
+ * @temp_payload: Payload for destroy handle data
+ * @ctx_id: Context Id
+ * @clk_info: Current clock info of a context
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
+ * @icp_dev_io_info: io config resource
+ */
+struct cam_icp_hw_ctx_data {
+ void *context_priv;
+ struct mutex ctx_mutex;
+ uint32_t fw_handle;
+ uint32_t scratch_mem_size;
+ struct cam_acquire_dev_cmd acquire_dev_cmd;
+ struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
+ cam_hw_event_cb_func ctxt_event_cb;
+ uint32_t state;
+ uint32_t role;
+ struct cam_icp_hw_ctx_data *chain_ctx;
+ struct hfi_frame_process_info hfi_frame_process;
+ struct completion wait_complete;
+ struct ipe_bps_destroy temp_payload;
+ uint32_t ctx_id;
+ struct cam_ctx_clk_info clk_info;
+ struct cam_req_mgr_timer *watch_dog;
+ uint32_t watch_dog_reset_counter;
+ struct cam_icp_acquire_dev_info icp_dev_io_info;
+};
+
+/**
+ * struct icp_cmd_generic_blob
+ * @ctx: Current context info
+ * @frame_info_idx: Index used for frame process info
+ * @io_buf_addr: pointer to io buffer address
+ */
+struct icp_cmd_generic_blob {
+ struct cam_icp_hw_ctx_data *ctx;
+ uint32_t frame_info_idx;
+ uint64_t *io_buf_addr;
+};
+
+/**
+ * struct cam_icp_clk_info
+ * @base_clk: Base clock to process request
+ * @curr_clk: Current clock of hadrware
+ * @threshold: Threshold for overclk count
+ * @over_clked: Over clock count
+ * @uncompressed_bw: Current bandwidth voting
+ * @compressed_bw: Current compressed bandwidth voting
+ * @hw_type: IPE/BPS device type
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
+ */
+struct cam_icp_clk_info {
+ uint32_t base_clk;
+ uint32_t curr_clk;
+ uint32_t threshold;
+ uint32_t over_clked;
+ uint64_t uncompressed_bw;
+ uint64_t compressed_bw;
+ uint32_t hw_type;
+ struct cam_req_mgr_timer *watch_dog;
+ uint32_t watch_dog_reset_counter;
+};
+
+/**
+ * struct cam_icp_hw_mgr
+ * @hw_mgr_mutex: Mutex for ICP hardware manager
+ * @hw_mgr_lock: Spinlock for ICP hardware manager
+ * @devices: Devices of ICP hardware manager
+ * @ctx_data: Context data
+ * @icp_caps: ICP capabilities
+ * @fw_download: Firmware download state
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @hfi_mem: Memory for hfi
+ * @cmd_work: Work queue for hfi commands
+ * @msg_work: Work queue for hfi messages
+ * @timer_work: Work queue for timer watchdog
+ * @msg_buf: Buffer for message data from firmware
+ * @dbg_buf: Buffer for debug data from firmware
+ * @a5_complete: Completion info
+ * @cmd_work_data: Pointer to command work queue task
+ * @msg_work_data: Pointer to message work queue task
+ * @timer_work_data: Pointer to timer work queue task
+ * @ctxt_cnt: Active context count
+ * @ipe_ctxt_cnt: IPE Active context count
+ * @bps_ctxt_cnt: BPS Active context count
+ * @dentry: Debugfs entry
+ * @a5_debug: A5 debug flag
+ * @icp_pc_flag: Flag to enable/disable power collapse
+ * @ipe_bps_pc_flag: Flag to enable/disable
+ * power collapse for ipe & bps
+ * @icp_debug_clk: Set clock based on debug value
+ * @icp_default_clk: Set this clok if user doesn't supply
+ * @clk_info: Clock info of hardware
+ * @secure_mode: Flag to enable/disable secure camera
+ * @a5_jtag_debug: entry to enable A5 JTAG debugging
+ * @a5_debug_type : entry to enable FW debug message/qdss
+ * @a5_dbg_lvl : debug level set to FW.
+ * @a5_fw_dump_lvl : level set for dumping the FW data
+ * @ipe0_enable: Flag for IPE0
+ * @ipe1_enable: Flag for IPE1
+ * @bps_enable: Flag for BPS
+ * @a5_dev_intf : Device interface for A5
+ * @ipe0_dev_intf: Device interface for IPE0
+ * @ipe1_dev_intf: Device interface for IPE1
+ * @bps_dev_intf: Device interface for BPS
+ * @ipe_clk_state: IPE clock state flag
+ * @bps_clk_state: BPS clock state flag
+ * @recovery: Flag to validate if in previous session FW
+ * reported a fatal error or wdt. If set FW is
+ * re-downloaded for new camera session.
+ */
+struct cam_icp_hw_mgr {
+ struct mutex hw_mgr_mutex;
+ spinlock_t hw_mgr_lock;
+
+ struct cam_hw_intf **devices[CAM_ICP_DEV_MAX];
+ struct cam_icp_hw_ctx_data ctx_data[CAM_ICP_CTX_MAX];
+ struct cam_icp_query_cap_cmd icp_caps;
+
+ bool fw_download;
+ int32_t iommu_hdl;
+ int32_t iommu_sec_hdl;
+ struct icp_hfi_mem_info hfi_mem;
+ struct cam_req_mgr_core_workq *cmd_work;
+ struct cam_req_mgr_core_workq *msg_work;
+ struct cam_req_mgr_core_workq *timer_work;
+ uint32_t msg_buf[ICP_MSG_BUF_SIZE];
+ uint32_t dbg_buf[ICP_DBG_BUF_SIZE];
+ struct completion a5_complete;
+ struct hfi_cmd_work_data *cmd_work_data;
+ struct hfi_msg_work_data *msg_work_data;
+ struct hfi_msg_work_data *timer_work_data;
+ uint32_t ctxt_cnt;
+ uint32_t ipe_ctxt_cnt;
+ uint32_t bps_ctxt_cnt;
+ struct dentry *dentry;
+ bool a5_debug;
+ bool icp_pc_flag;
+ bool ipe_bps_pc_flag;
+ uint64_t icp_debug_clk;
+ uint64_t icp_default_clk;
+ struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
+ bool secure_mode;
+ bool a5_jtag_debug;
+ u64 a5_debug_type;
+ u64 a5_dbg_lvl;
+ u64 a5_fw_dump_lvl;
+ bool ipe0_enable;
+ bool ipe1_enable;
+ bool bps_enable;
+ struct cam_hw_intf *a5_dev_intf;
+ struct cam_hw_intf *ipe0_dev_intf;
+ struct cam_hw_intf *ipe1_dev_intf;
+ struct cam_hw_intf *bps_dev_intf;
+ bool ipe_clk_state;
+ bool bps_clk_state;
+ atomic_t recovery;
+};
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args);
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr);
+static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr);
+#endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
new file mode 100644
index 000000000000..17a79d8d3f30
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_A5_HW_INTF_H
+#define CAM_A5_HW_INTF_H
+
+#include <linux/timer.h>
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+enum cam_icp_a5_cmd_type {
+ CAM_ICP_A5_CMD_FW_DOWNLOAD,
+ CAM_ICP_A5_CMD_POWER_COLLAPSE,
+ CAM_ICP_A5_CMD_POWER_RESUME,
+ CAM_ICP_A5_CMD_SET_FW_BUF,
+ CAM_ICP_A5_CMD_ACQUIRE,
+ CAM_ICP_A5_SET_IRQ_CB,
+ CAM_ICP_A5_TEST_IRQ,
+ CAM_ICP_A5_SEND_INIT,
+ CAM_ICP_A5_CMD_VOTE_CPAS,
+ CAM_ICP_A5_CMD_CPAS_START,
+ CAM_ICP_A5_CMD_CPAS_STOP,
+ CAM_ICP_A5_CMD_UBWC_CFG,
+ CAM_ICP_A5_CMD_PC_PREP,
+ CAM_ICP_A5_CMD_MAX,
+};
+
+struct cam_icp_a5_set_fw_buf_info {
+ uint32_t iova;
+ uint64_t kva;
+ uint64_t len;
+};
+
+/**
+ * struct cam_icp_a5_query_cap - ICP query device capability payload
+ * @fw_version: firmware version info
+ * @api_version: api version info
+ * @num_ipe: number of ipes
+ * @num_bps: number of bps
+ * @num_dev: number of device capabilities in dev_caps
+ * @reserved: reserved
+ * @dev_ver: returned device capability array
+ * @CAM_QUERY_CAP IOCTL
+ */
+struct cam_icp_a5_query_cap {
+ struct cam_icp_ver fw_version;
+ struct cam_icp_ver api_version;
+ uint32_t num_ipe;
+ uint32_t num_bps;
+ uint32_t num_dev;
+ uint32_t reserved;
+ struct cam_icp_dev_ver dev_ver[CAM_ICP_DEV_TYPE_MAX];
+};
+
+struct cam_icp_a5_acquire_dev {
+ uint32_t ctx_id;
+ struct cam_icp_acquire_dev_info icp_acquire_info;
+ struct cam_icp_res_info icp_out_acquire_info[2];
+ uint32_t fw_handle;
+};
+
+struct cam_icp_a5_set_irq_cb {
+ int32_t (*icp_hw_mgr_cb)(uint32_t irq_status, void *data);
+ void *data;
+};
+
+struct cam_icp_a5_test_irq {
+ uint32_t test_irq;
+};
+#endif /* CAM_A5_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
new file mode 100644
index 000000000000..0f76a057c687
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_BPS_HW_INTF_H
+#define CAM_BPS_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+/* BPS register */
+#define BPS_TOP_RST_CMD 0x1008
+#define BPS_CDM_RST_CMD 0x10
+#define BPS_CDM_IRQ_STATUS 0x44
+#define BPS_TOP_IRQ_STATUS 0x100C
+
+/* BPS CDM/TOP status register */
+#define BPS_RST_DONE_IRQ_STATUS_BIT 0x1
+
+enum cam_icp_bps_cmd_type {
+ CAM_ICP_BPS_CMD_FW_DOWNLOAD,
+ CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+ CAM_ICP_BPS_CMD_POWER_RESUME,
+ CAM_ICP_BPS_CMD_SET_FW_BUF,
+ CAM_ICP_BPS_CMD_VOTE_CPAS,
+ CAM_ICP_BPS_CMD_CPAS_START,
+ CAM_ICP_BPS_CMD_CPAS_STOP,
+ CAM_ICP_BPS_CMD_UPDATE_CLK,
+ CAM_ICP_BPS_CMD_DISABLE_CLK,
+ CAM_ICP_BPS_CMD_RESET,
+ CAM_ICP_BPS_CMD_MAX,
+};
+
+#endif /* CAM_BPS_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
new file mode 100644
index 000000000000..00726c12e8aa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_ICP_HW_INTF_H
+#define CAM_ICP_HW_INTF_H
+
+#define CAM_ICP_CMD_BUF_MAX_SIZE 128
+#define CAM_ICP_MSG_BUF_MAX_SIZE CAM_ICP_CMD_BUF_MAX_SIZE
+
+enum cam_a5_hw_type {
+ CAM_ICP_DEV_A5,
+ CAM_ICP_DEV_IPE,
+ CAM_ICP_DEV_BPS,
+ CAM_ICP_DEV_MAX,
+};
+
+/**
+ * struct cam_a5_clk_update_cmd - Payload for hw manager command
+ *
+ * @curr_clk_rate: clk rate to HW
+ * @ipe_bps_pc_enable power collpase enable flag
+ */
+struct cam_a5_clk_update_cmd {
+ uint32_t curr_clk_rate;
+ bool ipe_bps_pc_enable;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
new file mode 100644
index 000000000000..d1e3b9a9ae0d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_IPE_HW_INTF_H
+#define CAM_IPE_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+/* IPE registers */
+#define IPE_TOP_RST_CMD 0x1008
+#define IPE_CDM_RST_CMD 0x10
+#define IPE_CDM_IRQ_STATUS 0x44
+#define IPE_TOP_IRQ_STATUS 0x100C
+
+/* IPE CDM/TOP status register */
+#define IPE_RST_DONE_IRQ_STATUS_BIT 0x1
+
+enum cam_icp_ipe_cmd_type {
+ CAM_ICP_IPE_CMD_FW_DOWNLOAD,
+ CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+ CAM_ICP_IPE_CMD_POWER_RESUME,
+ CAM_ICP_IPE_CMD_SET_FW_BUF,
+ CAM_ICP_IPE_CMD_VOTE_CPAS,
+ CAM_ICP_IPE_CMD_CPAS_START,
+ CAM_ICP_IPE_CMD_CPAS_STOP,
+ CAM_ICP_IPE_CMD_UPDATE_CLK,
+ CAM_ICP_IPE_CMD_DISABLE_CLK,
+ CAM_ICP_IPE_CMD_RESET,
+ CAM_ICP_IPE_CMD_MAX,
+};
+
+#endif /* CAM_IPE_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
new file mode 100644
index 000000000000..7bb9b9ed18a2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_ICP_HW_MGR_INTF_H
+#define CAM_ICP_HW_MGR_INTF_H
+
+#include <uapi/media/cam_icp.h>
+#include <uapi/media/cam_defs.h>
+#include <linux/of.h>
+#include "cam_cpas_api.h"
+
+#define ICP_CLK_TURBO_HZ 600000000
+#define ICP_CLK_SVS_HZ 400000000
+
+#define CAM_ICP_A5_BW_BYTES_VOTE 40000000
+
+#define CAM_ICP_CTX_MAX 36
+
+#define CPAS_IPE1_BIT 0x2000
+
+int cam_icp_hw_mgr_init(struct device_node *of_node,
+ uint64_t *hw_mgr_hdl, int *iommu_hdl);
+
+/**
+ * struct cam_icp_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: flag for axi vote data
+ */
+struct cam_icp_cpas_vote {
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ uint32_t ahb_vote_valid;
+ uint32_t axi_vote_valid;
+};
+
+#endif /* CAM_ICP_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/Makefile
new file mode 100644
index 000000000000..a6e8c7b84cd7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_core.c
new file mode 100644
index 000000000000..142fcdc6017d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -0,0 +1,397 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "ipe_core.h"
+#include "ipe_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_ipe_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "hfi_reg.h"
+
+#define HFI_MAX_POLL_TRY 5
+
+static int cam_ipe_caps_vote(struct cam_ipe_device_core_info *core_info,
+ struct cam_icp_cpas_vote *cpas_vote)
+{
+ int rc = 0;
+
+ if (cpas_vote->ahb_vote_valid)
+ rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+ &cpas_vote->ahb_vote);
+ if (cpas_vote->axi_vote_valid)
+ rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+ &cpas_vote->axi_vote);
+
+ if (rc)
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
+
+ return rc;
+}
+
+int cam_ipe_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *ipe_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_ipe_device_core_info *core_info = NULL;
+ struct cam_icp_cpas_vote cpas_vote;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &ipe_dev->soc_info;
+ core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+
+ if ((!soc_info) || (!core_info)) {
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+ cpas_vote.axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ cpas_vote.axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_start = true;
+
+ rc = cam_ipe_enable_soc_resources(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "soc enable is failed : %d", rc);
+ if (cam_cpas_stop(core_info->cpas_handle))
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
+ else
+ core_info->cpas_start = false;
+ } else {
+ core_info->clk_enable = true;
+ }
+
+ return rc;
+}
+
+int cam_ipe_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *ipe_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_ipe_device_core_info *core_info = NULL;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &ipe_dev->soc_info;
+ core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+ if ((!soc_info) || (!core_info)) {
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ rc = cam_ipe_disable_soc_resources(soc_info, core_info->clk_enable);
+ if (rc)
+ CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc);
+ core_info->clk_enable = false;
+
+ if (core_info->cpas_start) {
+ if (cam_cpas_stop(core_info->cpas_handle))
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
+ else
+ core_info->cpas_start = false;
+ }
+
+ return rc;
+}
+
+static int cam_ipe_handle_pc(struct cam_hw_info *ipe_dev)
+{
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_ipe_device_core_info *core_info = NULL;
+ struct cam_ipe_device_hw_info *hw_info = NULL;
+ int pwr_ctrl;
+ int pwr_status;
+
+ soc_info = &ipe_dev->soc_info;
+ core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+ hw_info = core_info->ipe_hw_info;
+
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+ true, &pwr_ctrl);
+ if (!(pwr_ctrl & IPE_COLLAPSE_MASK)) {
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+ true, &pwr_status);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0x1);
+
+ if (pwr_status >> IPE_PWR_ON_MASK)
+ return -EINVAL;
+
+ }
+ cam_ipe_get_gdsc_control(soc_info);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+ true, &pwr_ctrl);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+ true, &pwr_status);
+ CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+ pwr_ctrl, pwr_status);
+
+ return 0;
+}
+
+static int cam_ipe_handle_resume(struct cam_hw_info *ipe_dev)
+{
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_ipe_device_core_info *core_info = NULL;
+ struct cam_ipe_device_hw_info *hw_info = NULL;
+ int pwr_ctrl;
+ int pwr_status;
+ int rc = 0;
+
+ soc_info = &ipe_dev->soc_info;
+ core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+ hw_info = core_info->ipe_hw_info;
+
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+ true, &pwr_ctrl);
+ if (pwr_ctrl & IPE_COLLAPSE_MASK) {
+ CAM_DBG(CAM_ICP, "IPE pwr_ctrl set(%x)", pwr_ctrl);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0);
+ }
+
+ rc = cam_ipe_transfer_gdsc_control(soc_info);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+ true, &pwr_status);
+ CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+ pwr_ctrl, pwr_status);
+
+ return rc;
+}
+
+static int cam_ipe_cmd_reset(struct cam_hw_soc_info *soc_info,
+ struct cam_ipe_device_core_info *core_info)
+{
+ int pwr_ctrl, pwr_status, rc = 0;
+ uint32_t status = 0, retry_cnt = 0;
+ bool reset_ipe_cdm_fail = false;
+ bool reset_ipe_top_fail = false;
+
+ CAM_DBG(CAM_ICP, "CAM_ICP_IPE_CMD_RESET");
+ /* IPE CDM core reset*/
+ cam_io_w_mb((uint32_t)0xF,
+ soc_info->reg_map[0].mem_base + IPE_CDM_RST_CMD);
+ while (retry_cnt < HFI_MAX_POLL_TRY) {
+ readw_poll_timeout((soc_info->reg_map[0].mem_base +
+ IPE_CDM_IRQ_STATUS),
+ status, ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+ 100, 10000);
+
+ CAM_DBG(CAM_HFI, "ipe_cdm_irq_status = %u", status);
+
+ if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+ break;
+ retry_cnt++;
+ }
+ status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ IPE_CDM_IRQ_STATUS);
+ if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+ CAM_ERR(CAM_ICP, "IPE CDM rst failed status 0x%x", status);
+ reset_ipe_cdm_fail = true;
+ }
+
+ /* IPE reset*/
+ status = 0;
+ cam_io_w_mb((uint32_t)0x3,
+ soc_info->reg_map[0].mem_base + IPE_TOP_RST_CMD);
+ while (retry_cnt < HFI_MAX_POLL_TRY) {
+ readw_poll_timeout((soc_info->reg_map[0].mem_base +
+ IPE_TOP_IRQ_STATUS),
+ status, ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+ 100, 10000);
+
+ CAM_DBG(CAM_HFI, "ipe_top_irq_status = %u", status);
+
+
+ if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+ break;
+ retry_cnt++;
+ }
+ status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ IPE_TOP_IRQ_STATUS);
+ if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+ CAM_ERR(CAM_ICP, "IPE top rst failed status 0x%x", status);
+ reset_ipe_top_fail = true;
+ }
+
+ cam_ipe_get_gdsc_control(soc_info);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, core_info->ipe_hw_info->pwr_ctrl,
+ true, &pwr_ctrl);
+ cam_cpas_reg_read(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP, core_info->ipe_hw_info->pwr_status,
+ true, &pwr_status);
+ CAM_DBG(CAM_ICP, "(After)pwr_ctrl = %x pwr_status = %x",
+ pwr_ctrl, pwr_status);
+
+ if (reset_ipe_cdm_fail || reset_ipe_top_fail)
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *ipe_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_ipe_device_core_info *core_info = NULL;
+ struct cam_ipe_device_hw_info *hw_info = NULL;
+ int rc = 0;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_ICP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (cmd_type >= CAM_ICP_IPE_CMD_MAX) {
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
+ return -EINVAL;
+ }
+
+ soc_info = &ipe_dev->soc_info;
+ core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+ hw_info = core_info->ipe_hw_info;
+
+ switch (cmd_type) {
+ case CAM_ICP_IPE_CMD_VOTE_CPAS: {
+ struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+ if (!cmd_args)
+ return -EINVAL;
+
+ cam_ipe_caps_vote(core_info, cpas_vote);
+ break;
+ }
+
+ case CAM_ICP_IPE_CMD_CPAS_START: {
+ struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+ if (!cmd_args)
+ return -EINVAL;
+
+ if (!core_info->cpas_start) {
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+ core_info->cpas_start = true;
+ }
+ break;
+ }
+
+ case CAM_ICP_IPE_CMD_CPAS_STOP:
+ if (core_info->cpas_start) {
+ cam_cpas_stop(core_info->cpas_handle);
+ core_info->cpas_start = false;
+ }
+ break;
+ case CAM_ICP_IPE_CMD_POWER_COLLAPSE:
+ rc = cam_ipe_handle_pc(ipe_dev);
+ break;
+ case CAM_ICP_IPE_CMD_POWER_RESUME:
+ rc = cam_ipe_handle_resume(ipe_dev);
+ break;
+ case CAM_ICP_IPE_CMD_UPDATE_CLK: {
+ struct cam_a5_clk_update_cmd *clk_upd_cmd =
+ (struct cam_a5_clk_update_cmd *)cmd_args;
+ uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
+
+ CAM_DBG(CAM_ICP, "ipe_src_clk rate = %d", (int)clk_rate);
+ if (!core_info->clk_enable) {
+ if (clk_upd_cmd->ipe_bps_pc_enable) {
+ cam_ipe_handle_pc(ipe_dev);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0x0);
+ }
+ rc = cam_ipe_toggle_clk(soc_info, true);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Enable failed");
+ else
+ core_info->clk_enable = true;
+ if (clk_upd_cmd->ipe_bps_pc_enable) {
+ rc = cam_ipe_handle_resume(ipe_dev);
+ if (rc)
+ CAM_ERR(CAM_ICP, "bps resume failed");
+ }
+ }
+ CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
+
+ rc = cam_ipe_update_clk_rate(soc_info, clk_rate);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to update clk");
+ }
+ break;
+ case CAM_ICP_IPE_CMD_DISABLE_CLK:
+ if (core_info->clk_enable == true)
+ cam_ipe_toggle_clk(soc_info, false);
+ core_info->clk_enable = false;
+ break;
+ case CAM_ICP_IPE_CMD_RESET:
+ rc = cam_ipe_cmd_reset(soc_info, core_info);
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+irqreturn_t cam_ipe_irq(int irq_num, void *data)
+{
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_core.h
new file mode 100644
index 000000000000..bf170d200129
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_core.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_IPE_CORE_H
+#define CAM_IPE_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+#define IPE_COLLAPSE_MASK 0x1
+#define IPE_PWR_ON_MASK 0x2
+
+struct cam_ipe_device_hw_info {
+ uint32_t hw_idx;
+ uint32_t pwr_ctrl;
+ uint32_t pwr_status;
+ uint32_t reserved;
+};
+
+struct cam_ipe_device_core_info {
+ struct cam_ipe_device_hw_info *ipe_hw_info;
+ uint32_t cpas_handle;
+ bool cpas_start;
+ bool clk_enable;
+};
+
+int cam_ipe_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_ipe_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_ipe_irq(int irq_num, void *data);
+
+#endif /* CAM_IPE_CORE_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_dev.c
new file mode 100644
index 000000000000..a01d114bae7b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_dev.c
@@ -0,0 +1,206 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "ipe_core.h"
+#include "ipe_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_ipe_device_hw_info cam_ipe_hw_info[] = {
+ {
+ .hw_idx = 0,
+ .pwr_ctrl = 0x4c,
+ .pwr_status = 0x48,
+ .reserved = 0,
+ },
+ {
+ .hw_idx = 1,
+ .pwr_ctrl = 0x54,
+ .pwr_status = 0x50,
+ .reserved = 0,
+ },
+};
+EXPORT_SYMBOL(cam_ipe_hw_info);
+
+static char ipe_dev_name[8];
+
+int cam_ipe_register_cpas(struct cam_hw_soc_info *soc_info,
+ struct cam_ipe_device_core_info *core_info,
+ uint32_t hw_idx)
+{
+ struct cam_cpas_register_params cpas_register_params;
+ int rc;
+
+ cpas_register_params.dev = &soc_info->pdev->dev;
+ memcpy(cpas_register_params.identifier, "ipe", sizeof("ipe"));
+ cpas_register_params.cam_cpas_client_cb = NULL;
+ cpas_register_params.cell_index = hw_idx;
+ cpas_register_params.userdata = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_params);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_handle = cpas_register_params.client_handle;
+
+ return rc;
+}
+
+int cam_ipe_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *ipe_dev = NULL;
+ struct cam_hw_intf *ipe_dev_intf = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_ipe_device_core_info *core_info = NULL;
+ struct cam_ipe_device_hw_info *hw_info = NULL;
+ int rc = 0;
+ struct cam_cpas_query_cap query;
+ uint32_t cam_caps;
+ uint32_t hw_idx;
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &hw_idx);
+
+ cam_cpas_get_hw_info(&query.camera_family,
+ &query.camera_version, &query.cpas_version, &cam_caps);
+ if ((!(cam_caps & CPAS_IPE1_BIT)) && (hw_idx)) {
+ CAM_ERR(CAM_ICP, "IPE1 hw idx = %d\n", hw_idx);
+ return -EINVAL;
+ }
+
+ ipe_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!ipe_dev_intf)
+ return -ENOMEM;
+
+ ipe_dev_intf->hw_idx = hw_idx;
+ ipe_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!ipe_dev) {
+ kfree(ipe_dev_intf);
+ return -ENOMEM;
+ }
+
+ memset(ipe_dev_name, 0, sizeof(ipe_dev_name));
+ snprintf(ipe_dev_name, sizeof(ipe_dev_name),
+ "ipe%1u", ipe_dev_intf->hw_idx);
+
+ ipe_dev->soc_info.pdev = pdev;
+ ipe_dev->soc_info.dev = &pdev->dev;
+ ipe_dev->soc_info.dev_name = ipe_dev_name;
+ ipe_dev_intf->hw_priv = ipe_dev;
+ ipe_dev_intf->hw_ops.init = cam_ipe_init_hw;
+ ipe_dev_intf->hw_ops.deinit = cam_ipe_deinit_hw;
+ ipe_dev_intf->hw_ops.process_cmd = cam_ipe_process_cmd;
+ ipe_dev_intf->hw_type = CAM_ICP_DEV_IPE;
+
+ CAM_DBG(CAM_ICP, "type %d index %d",
+ ipe_dev_intf->hw_type,
+ ipe_dev_intf->hw_idx);
+
+ platform_set_drvdata(pdev, ipe_dev_intf);
+
+ ipe_dev->core_info = kzalloc(sizeof(struct cam_ipe_device_core_info),
+ GFP_KERNEL);
+ if (!ipe_dev->core_info) {
+ kfree(ipe_dev);
+ kfree(ipe_dev_intf);
+ return -ENOMEM;
+ }
+ core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_DBG(CAM_ICP, "No ipe hardware info");
+ kfree(ipe_dev->core_info);
+ kfree(ipe_dev);
+ kfree(ipe_dev_intf);
+ rc = -EINVAL;
+ return rc;
+ }
+ hw_info = &cam_ipe_hw_info[ipe_dev_intf->hw_idx];
+ core_info->ipe_hw_info = hw_info;
+
+ rc = cam_ipe_init_soc_resources(&ipe_dev->soc_info, cam_ipe_irq,
+ ipe_dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "failed to init_soc");
+ kfree(ipe_dev->core_info);
+ kfree(ipe_dev);
+ kfree(ipe_dev_intf);
+ return rc;
+ }
+
+ CAM_DBG(CAM_ICP, "cam_ipe_init_soc_resources : %pK",
+ (void *)&ipe_dev->soc_info);
+ rc = cam_ipe_register_cpas(&ipe_dev->soc_info,
+ core_info, ipe_dev_intf->hw_idx);
+ if (rc < 0) {
+ kfree(ipe_dev->core_info);
+ kfree(ipe_dev);
+ kfree(ipe_dev_intf);
+ return rc;
+ }
+ ipe_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&ipe_dev->hw_mutex);
+ spin_lock_init(&ipe_dev->hw_lock);
+ init_completion(&ipe_dev->hw_complete);
+
+ CAM_DBG(CAM_ICP, "IPE%d probe successful",
+ ipe_dev_intf->hw_idx);
+
+ return rc;
+}
+
+static const struct of_device_id cam_ipe_dt_match[] = {
+ {
+ .compatible = "qcom,cam-ipe",
+ .data = &cam_ipe_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_ipe_dt_match);
+
+static struct platform_driver cam_ipe_driver = {
+ .probe = cam_ipe_probe,
+ .driver = {
+ .name = "cam-ipe",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_ipe_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_ipe_init_module(void)
+{
+ return platform_driver_register(&cam_ipe_driver);
+}
+
+static void __exit cam_ipe_exit_module(void)
+{
+ platform_driver_unregister(&cam_ipe_driver);
+}
+
+module_init(cam_ipe_init_module);
+module_exit(cam_ipe_exit_module);
+MODULE_DESCRIPTION("CAM IPE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_soc.c
new file mode 100644
index 000000000000..91d440fcdcee
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -0,0 +1,173 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "ipe_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+
+int cam_ipe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ rc = regulator_set_mode(soc_info->rgltr[i],
+ REGULATOR_MODE_FAST);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+ soc_info->rgltr_name[i]);
+ goto rgltr_set_mode_failed;
+ }
+ }
+ return 0;
+
+rgltr_set_mode_failed:
+ for (i = i - 1; i >= 0; i--)
+ if (soc_info->rgltr[i])
+ regulator_set_mode(soc_info->rgltr[i],
+ REGULATOR_MODE_NORMAL);
+
+ return rc;
+}
+
+int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ rc = regulator_set_mode(soc_info->rgltr[i],
+ REGULATOR_MODE_NORMAL);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+ soc_info->rgltr_name[i]);
+ goto rgltr_set_mode_failed;
+ }
+ }
+ return 0;
+
+rgltr_set_mode_failed:
+ for (i = i - 1; i >= 0; i--)
+ if (soc_info->rgltr[i])
+ regulator_set_mode(soc_info->rgltr[i],
+ REGULATOR_MODE_FAST);
+
+ return rc;
+}
+
+static int cam_ipe_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ICP, "get ipe dt prop is failed");
+
+ return rc;
+}
+
+static int cam_ipe_request_platform_resource(
+ struct cam_hw_soc_info *soc_info,
+ irq_handler_t ipe_irq_handler, void *irq_data)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_request_platform_resource(soc_info, ipe_irq_handler,
+ irq_data);
+
+ return rc;
+}
+
+int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t ipe_irq_handler, void *irq_data)
+{
+ int rc = 0;
+
+ rc = cam_ipe_get_dt_properties(soc_info);
+ if (rc < 0)
+ return rc;
+
+ rc = cam_ipe_request_platform_resource(soc_info, ipe_irq_handler,
+ irq_data);
+ if (rc < 0)
+ return rc;
+
+ return rc;
+}
+
+int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, false);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "enable platform failed");
+ return rc;
+ }
+
+ return rc;
+}
+
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clk)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+ false);
+ if (rc)
+ CAM_ERR(CAM_ICP, "enable platform failed");
+
+ return rc;
+}
+
+int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_rate)
+{
+ int32_t src_clk_idx;
+
+ if (!soc_info)
+ return -EINVAL;
+
+ src_clk_idx = soc_info->src_clk_idx;
+
+ if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+ (soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+ (clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+ CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+ clk_rate,
+ soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+ clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+ }
+
+ return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+}
+
+int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+ int rc = 0;
+
+ if (clk_enable)
+ rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+ else
+ cam_soc_util_clk_disable_default(soc_info);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_soc.h b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_soc.h
new file mode 100644
index 000000000000..d05a08a11002
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/ipe_hw/ipe_soc.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_IPE_SOC_H
+#define CAM_IPE_SOC_H
+
+#include "cam_soc_util.h"
+
+int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t ipe_irq_handler, void *irq_data);
+
+int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+ bool disable_clk);
+
+int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_ipe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_rate);
+int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
+#endif /* CAM_IPE_SOC_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/Makefile
new file mode 100644
index 000000000000..aa8d1c6595a3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += isp_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_dev.o cam_isp_context.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_context.c
new file mode 100644
index 000000000000..113e1a8d5298
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_context.c
@@ -0,0 +1,3360 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+#include "cam_context_utils.h"
+#include "cam_cdm_util.h"
+#include "cam_isp_context.h"
+#include "cam_common_util.h"
+
+static const char isp_dev_name[] = "isp";
+
+char *substate_name[8] = {
+ "CAM_ISP_CTX_ACTIVATED_SOF",
+ "CAM_ISP_CTX_ACTIVATED_APPLIED",
+ "CAM_ISP_CTX_ACTIVATED_EPOCH",
+ "CAM_ISP_CTX_ACTIVATED_BUBBLE",
+ "CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED",
+ "CAM_ISP_CTX_ACTIVATED_HW_ERROR",
+ "CAM_ISP_CTX_ACTIVATED_HALT",
+ "CAM_ISP_CTX_ACTIVATED_MAX",
+};
+#define INC_STATE_MONITOR_HEAD(head) \
+ (atomic64_add_return(1, head) % \
+ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES)
+
+static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
+ uint32_t buf_info);
+
+static void __cam_isp_ctx_update_state_monitor_array(
+ struct cam_isp_context *ctx_isp,
+ enum cam_isp_state_change_trigger trigger_type,
+ uint32_t req_id)
+{
+ int iterator = 0;
+
+ iterator = INC_STATE_MONITOR_HEAD(&ctx_isp->state_monitor_head);
+ ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
+ ctx_isp->substate_activated;
+ ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
+ trigger_type;
+ ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
+ req_id;
+ ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
+ jiffies_to_msecs(jiffies);
+}
+
+static const char *__cam_isp_ctx_substate_val_to_type(
+ uint32_t type)
+{
+ switch (type) {
+ case CAM_ISP_CTX_ACTIVATED_SOF:
+ return "SOF";
+ case CAM_ISP_CTX_ACTIVATED_APPLIED:
+ return "APPLIED";
+ case CAM_ISP_CTX_ACTIVATED_EPOCH:
+ return "EPOCH";
+ case CAM_ISP_CTX_ACTIVATED_BUBBLE:
+ return "BUBBLE";
+ case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED:
+ return "BUBBLE_APPLIED";
+ case CAM_ISP_CTX_ACTIVATED_HALT:
+ return "HALT";
+ default:
+ return "CAM_ISP_CTX_INVALID_STATE";
+ }
+}
+
+static const char *__cam_isp_hw_evt_val_to_type(
+ uint32_t evt_id)
+{
+ switch (evt_id) {
+ case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR:
+ return "ERROR";
+ case CAM_ISP_STATE_CHANGE_TRIGGER_SOF:
+ return "SOF";
+ case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE:
+ return "REG_UPDATE";
+ case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH:
+ return "EPOCH";
+ case CAM_ISP_STATE_CHANGE_TRIGGER_EOF:
+ return "EOF";
+ case CAM_ISP_STATE_CHANGE_TRIGGER_DONE:
+ return "DONE";
+ default:
+ return "CAM_ISP_EVENT_INVALID";
+ }
+}
+
+static void __cam_isp_ctx_dump_state_monitor_array(
+ struct cam_isp_context *ctx_isp)
+{
+ int i = 0;
+ uint64_t state_head = 0;
+ uint64_t index;
+
+ state_head = atomic64_read(&ctx_isp->state_monitor_head);
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Dumping state information for preceding requests");
+
+ for (i = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES - 1; i >= 0;
+ i--) {
+ index = (((state_head - i) +
+ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) %
+ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES);
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "time[0x%llx] req_id[%u] state[%s] evt_type[%s]",
+ ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
+ ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
+ __cam_isp_ctx_substate_val_to_type(
+ ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
+ __cam_isp_hw_evt_val_to_type(
+ ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
+ }
+}
+
+static void cam_isp_ctx_dump_req(struct cam_isp_ctx_req *req_isp)
+{
+ int i = 0, rc = 0;
+ size_t len = 0;
+ uint32_t *buf_addr;
+ uint32_t *buf_start, *buf_end;
+
+ for (i = 0; i < req_isp->num_cfg; i++) {
+ rc = cam_packet_util_get_cmd_mem_addr(
+ req_isp->cfg[i].handle, &buf_addr, &len);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Failed to get_cmd_mem_addr, rc=%d",
+ rc);
+ } else {
+ buf_start = (uint32_t *)((uint8_t *) buf_addr +
+ req_isp->cfg[i].offset);
+ buf_end = (uint32_t *)((uint8_t *) buf_start +
+ req_isp->cfg[i].len - 1);
+ if (len < (buf_end - buf_start + 1)) {
+ CAM_DBG(CAM_ISP,
+ "Invalid len %zu, buf_start-end=%d",
+ len, (buf_end - buf_start + 1));
+ continue;
+ }
+ cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
+ }
+ }
+}
+
+static int __cam_isp_ctx_enqueue_request_in_order(
+ struct cam_context *ctx, struct cam_ctx_request *req)
+{
+ struct cam_ctx_request *req_current;
+ struct cam_ctx_request *req_prev;
+ struct list_head temp_list;
+
+ INIT_LIST_HEAD(&temp_list);
+ spin_lock_bh(&ctx->lock);
+ if (list_empty(&ctx->pending_req_list)) {
+ list_add_tail(&req->list, &ctx->pending_req_list);
+ } else {
+ list_for_each_entry_safe_reverse(
+ req_current, req_prev, &ctx->pending_req_list, list) {
+ if (req->request_id < req_current->request_id) {
+ list_del_init(&req_current->list);
+ list_add(&req_current->list, &temp_list);
+ continue;
+ } else if (req->request_id == req_current->request_id) {
+ CAM_WARN(CAM_ISP,
+ "Received duplicated request %lld",
+ req->request_id);
+ }
+ break;
+ }
+ list_add_tail(&req->list, &ctx->pending_req_list);
+
+ if (!list_empty(&temp_list)) {
+ list_for_each_entry_safe(
+ req_current, req_prev, &temp_list, list) {
+ list_del_init(&req_current->list);
+ list_add_tail(&req_current->list,
+ &ctx->pending_req_list);
+ }
+ }
+ }
+ spin_unlock_bh(&ctx->lock);
+ return 0;
+}
+
+static int __cam_isp_ctx_enqueue_init_request(
+ struct cam_context *ctx, struct cam_ctx_request *req)
+{
+ int rc = 0;
+ struct cam_ctx_request *req_old;
+ struct cam_isp_ctx_req *req_isp_old;
+ struct cam_isp_ctx_req *req_isp_new;
+
+ spin_lock_bh(&ctx->lock);
+ if (list_empty(&ctx->pending_req_list)) {
+ list_add_tail(&req->list, &ctx->pending_req_list);
+ CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
+ req->request_id);
+ goto end;
+ }
+
+ req_old = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
+ req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
+ if (req_isp_old->hw_update_data.packet_opcode_type ==
+ CAM_ISP_PACKET_INIT_DEV) {
+ if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
+ CAM_ISP_CTX_CFG_MAX) {
+ CAM_WARN(CAM_ISP, "Can not merge INIT pkt");
+ rc = -ENOMEM;
+ }
+
+ if (req_isp_old->num_fence_map_out != 0 ||
+ req_isp_old->num_fence_map_in != 0) {
+ CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ memcpy(req_isp_old->fence_map_out,
+ req_isp_new->fence_map_out,
+ sizeof(req_isp_new->fence_map_out[0])*
+ req_isp_new->num_fence_map_out);
+ req_isp_old->num_fence_map_out =
+ req_isp_new->num_fence_map_out;
+
+ memcpy(req_isp_old->fence_map_in,
+ req_isp_new->fence_map_in,
+ sizeof(req_isp_new->fence_map_in[0])*
+ req_isp_new->num_fence_map_in);
+ req_isp_old->num_fence_map_in =
+ req_isp_new->num_fence_map_in;
+
+ memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
+ req_isp_new->cfg,
+ sizeof(req_isp_new->cfg[0])*
+ req_isp_new->num_cfg);
+ req_isp_old->num_cfg += req_isp_new->num_cfg;
+
+ req_old->request_id = req->request_id;
+
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+ } else {
+ CAM_WARN(CAM_ISP,
+ "Received Update pkt before INIT pkt. req_id= %lld",
+ req->request_id);
+ rc = -EINVAL;
+ }
+end:
+ spin_unlock_bh(&ctx->lock);
+ return rc;
+}
+
+static const char *__cam_isp_resource_handle_id_to_type(
+ uint32_t resource_handle)
+{
+ switch (resource_handle) {
+ case CAM_ISP_IFE_OUT_RES_FULL:
+ return "FULL";
+ case CAM_ISP_IFE_OUT_RES_DS4:
+ return "DS4";
+ case CAM_ISP_IFE_OUT_RES_DS16:
+ return "DS16";
+ case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
+ return "RAW_DUMP";
+ case CAM_ISP_IFE_OUT_RES_FD:
+ return "FD";
+ case CAM_ISP_IFE_OUT_RES_PDAF:
+ return "PDAF";
+ case CAM_ISP_IFE_OUT_RES_2PD:
+ return "2PD";
+ case CAM_ISP_IFE_OUT_RES_RDI_0:
+ return "RDI_0";
+ case CAM_ISP_IFE_OUT_RES_RDI_1:
+ return "RDI_1";
+ case CAM_ISP_IFE_OUT_RES_RDI_2:
+ return "RDI_2";
+ case CAM_ISP_IFE_OUT_RES_RDI_3:
+ return "RDI_3";
+ case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
+ return "STATS_HDR_BE";
+ case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
+ return "STATS_HDR_BHIST";
+ case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
+ return "STATS_TL_BG";
+ case CAM_ISP_IFE_OUT_RES_STATS_BF:
+ return "STATS_BF";
+ case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
+ return "STATS_AWB_BG";
+ case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
+ return "STATS_BHIST";
+ case CAM_ISP_IFE_OUT_RES_STATS_RS:
+ return "STATS_RS";
+ case CAM_ISP_IFE_OUT_RES_STATS_CS:
+ return "STATS_CS";
+ default:
+ return "CAM_ISP_Invalid_Resource_Type";
+ }
+}
+
+static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
+{
+ uint64_t ts = 0;
+
+ if (!evt_data)
+ return 0;
+
+ switch (evt_id) {
+ case CAM_ISP_HW_EVENT_ERROR:
+ ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_SOF:
+ ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_REG_UPDATE:
+ ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_EPOCH:
+ ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_EOF:
+ ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_DONE:
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
+ }
+
+ return ts;
+}
+
+static void __cam_isp_ctx_handle_buf_done_fail_log(
+ struct cam_isp_ctx_req *req_isp)
+{
+ int i;
+
+ if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Num Resources exceed mMAX %d >= %d ",
+ req_isp->num_fence_map_out, CAM_ISP_CTX_RES_MAX);
+ return;
+ }
+
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Resource Handles that fail to generate buf_done in prev frame");
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ if (req_isp->fence_map_out[i].sync_id != -1)
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Resource_Handle: [%s] Sync_ID: [0x%x]",
+ __cam_isp_resource_handle_id_to_type(
+ req_isp->fence_map_out[i].resource_handle),
+ req_isp->fence_map_out[i].sync_id);
+ }
+}
+
+static int __cam_isp_ctx_handle_buf_done_in_activated_state(
+ struct cam_isp_context *ctx_isp,
+ struct cam_isp_hw_done_event_data *done,
+ uint32_t bubble_state)
+{
+ int rc = 0;
+ int i, j;
+ struct cam_ctx_request *req;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_context *ctx = ctx_isp->base;
+
+ if (list_empty(&ctx->active_req_list)) {
+ CAM_DBG(CAM_ISP, "Buf done with no active request!");
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP, "Enter with bubble_state %d", bubble_state);
+
+ req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+
+ trace_cam_buf_done("ISP", ctx, req);
+
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ for (i = 0; i < done->num_handles; i++) {
+ for (j = 0; j < req_isp->num_fence_map_out; j++) {
+ if (done->resource_handle[i] ==
+ req_isp->fence_map_out[j].resource_handle)
+ break;
+ }
+
+ if (j == req_isp->num_fence_map_out) {
+ CAM_ERR(CAM_ISP,
+ "Can not find matching lane handle 0x%x!",
+ done->resource_handle[i]);
+ rc = -EINVAL;
+ continue;
+ }
+
+ if (req_isp->fence_map_out[j].sync_id == -1) {
+ __cam_isp_ctx_handle_buf_done_fail_log(req_isp);
+ continue;
+ }
+
+ if (!req_isp->bubble_detected) {
+ CAM_INFO(CAM_ISP,
+ "Sync with success: req %lld res 0x%x fd 0x%x",
+ req->request_id,
+ req_isp->fence_map_out[j].resource_handle,
+ req_isp->fence_map_out[j].sync_id);
+
+ rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+ CAM_SYNC_STATE_SIGNALED_SUCCESS);
+ if (rc)
+ CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
+ rc);
+ } else if (!req_isp->bubble_report) {
+ CAM_INFO(CAM_ISP,
+ "Sync with failure: req %lld res 0x%x fd 0x%x",
+ req->request_id,
+ req_isp->fence_map_out[j].resource_handle,
+ req_isp->fence_map_out[j].sync_id);
+
+ rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
+ rc);
+ } else {
+ /*
+ * Ignore the buffer done if bubble detect is on
+ * Increment the ack number here, and queue the
+ * request back to pending list whenever all the
+ * buffers are done.
+ */
+ req_isp->num_acked++;
+ CAM_DBG(CAM_ISP,
+ "buf done with bubble state %d recovery %d",
+ bubble_state, req_isp->bubble_report);
+ continue;
+ }
+
+ CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x",
+ req->request_id,
+ req_isp->fence_map_out[j].sync_id);
+ if (!rc) {
+ req_isp->num_acked++;
+ req_isp->fence_map_out[j].sync_id = -1;
+ }
+ }
+
+ if (req_isp->num_acked > req_isp->num_fence_map_out) {
+ /* Should not happen */
+ CAM_ERR(CAM_ISP,
+ "WARNING: req_id %lld num_acked %d > map_out %d",
+ req->request_id, req_isp->num_acked,
+ req_isp->num_fence_map_out);
+ WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
+ }
+
+ if (req_isp->num_acked != req_isp->num_fence_map_out)
+ return rc;
+
+ ctx_isp->active_req_cnt--;
+
+ if (req_isp->bubble_detected && req_isp->bubble_report) {
+ req_isp->num_acked = 0;
+ req_isp->bubble_detected = false;
+ list_del_init(&req->list);
+ list_add(&req->list, &ctx->pending_req_list);
+
+ CAM_DBG(CAM_REQ,
+ "Move active request %lld to pending list(cnt = %d) [bubble recovery]",
+ req->request_id, ctx_isp->active_req_cnt);
+ } else {
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+
+ CAM_DBG(CAM_REQ,
+ "Move active request %lld to free list(cnt = %d) [all fences done]",
+ req->request_id, ctx_isp->active_req_cnt);
+ }
+
+end:
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_DONE,
+ ctx_isp->base->req_list->request_id);
+ return rc;
+}
+
+static void __cam_isp_ctx_send_sof_boot_timestamp(
+ struct cam_isp_context *ctx_isp, uint64_t request_id,
+ uint32_t sof_event_status)
+{
+ struct cam_req_mgr_message req_msg;
+
+ req_msg.session_hdl = ctx_isp->base->session_hdl;
+ req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
+ req_msg.u.frame_msg.request_id = request_id;
+ req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp;
+ req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
+ req_msg.u.frame_msg.sof_status = sof_event_status;
+
+ CAM_DBG(CAM_ISP,
+ "request id:%lld frame number:%lld boot time stamp:0x%llx",
+ request_id, ctx_isp->frame_id,
+ ctx_isp->boot_timestamp);
+
+ if (cam_req_mgr_notify_message(&req_msg,
+ V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS,
+ V4L_EVENT_CAM_REQ_MGR_EVENT))
+ CAM_ERR(CAM_ISP,
+ "Error in notifying the boot time for req id:%lld",
+ request_id);
+}
+
+
+static void __cam_isp_ctx_send_sof_timestamp(
+ struct cam_isp_context *ctx_isp, uint64_t request_id,
+ uint32_t sof_event_status)
+{
+ struct cam_req_mgr_message req_msg;
+
+ req_msg.session_hdl = ctx_isp->base->session_hdl;
+ req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
+ req_msg.u.frame_msg.request_id = request_id;
+ req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
+ req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
+ req_msg.u.frame_msg.sof_status = sof_event_status;
+
+ CAM_DBG(CAM_ISP,
+ "request id:%lld frame number:%lld SOF time stamp:0x%llx",
+ request_id, ctx_isp->frame_id,
+ ctx_isp->sof_timestamp_val);
+ CAM_DBG(CAM_ISP, "sof status:%d", sof_event_status);
+
+ if (cam_req_mgr_notify_message(&req_msg,
+ V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
+ CAM_ERR(CAM_ISP,
+ "Error in notifying the sof time for req id:%lld",
+ request_id);
+
+ __cam_isp_ctx_send_sof_boot_timestamp(ctx_isp,
+ request_id, sof_event_status);
+
+}
+
+static int __cam_isp_ctx_reg_upd_in_epoch_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ CAM_INFO(CAM_ISP, "state = %s IRQ is RUP", substate_name[ctx_isp->substate_activated]);
+ if (ctx_isp->frame_id == 1)
+ CAM_DBG(CAM_ISP, "Reg update for early PCR");
+ else
+ CAM_WARN(CAM_ISP,
+ "Unexpected reg update in activated substate:%d for frame_id:%lld",
+ ctx_isp->substate_activated, ctx_isp->frame_id);
+ return 0;
+}
+
+static int __cam_isp_ctx_reg_upd_in_activated_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_ctx_request *req;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_ctx_req *req_isp;
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is RUP", substate_name[ctx_isp->substate_activated]);
+ if (list_empty(&ctx->wait_req_list)) {
+ CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
+ goto end;
+ }
+ req = list_first_entry(&ctx->wait_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ if (req_isp->num_fence_map_out != 0) {
+ list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ } else {
+ /* no io config, so the request is completed. */
+ list_add_tail(&req->list, &ctx->free_req_list);
+ CAM_DBG(CAM_ISP,
+ "move active request %lld to free list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ }
+
+ /*
+ * This function only called directly from applied and bubble applied
+ * state so change substate here.
+ */
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+end:
+ return rc;
+}
+
+static int __cam_isp_ctx_notify_sof_in_activated_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_req_mgr_trigger_notify notify;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_ctx_request *req;
+ uint64_t request_id = 0;
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is EPOCH", substate_name[ctx_isp->substate_activated]);
+ /*
+ * notify reqmgr with sof signal. Note, due to scheduling delay
+ * we can run into situation that two active requests has already
+ * be in the active queue while we try to do the notification.
+ * In this case, we need to skip the current notification. This
+ * helps the state machine to catch up the delay.
+ */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
+ ctx_isp->active_req_cnt <= 2) {
+ if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+ notify.trigger = CAM_TRIGGER_POINT_SOF;
+
+ ctx->ctx_crm_intf->notify_trigger(&notify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+ }
+
+ list_for_each_entry(req, &ctx->active_req_list, list) {
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ break;
+ }
+ }
+
+ if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
+ request_id = 0;
+
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ } else {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
+ rc = -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __cam_isp_ctx_notify_eof_in_activated_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_req_mgr_trigger_notify notify;
+ struct cam_context *ctx = ctx_isp->base;
+
+ if (!(ctx_isp->subscribe_event & CAM_TRIGGER_POINT_EOF))
+ return rc;
+
+ /* notify reqmgr with eof signal */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+ notify.trigger = CAM_TRIGGER_POINT_EOF;
+
+ ctx->ctx_crm_intf->notify_trigger(&notify);
+ CAM_DBG(CAM_ISP, "Notify CRM EOF frame %lld\n",
+ ctx_isp->frame_id);
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify EOF to CRM");
+ rc = -EFAULT;
+ }
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is EOF", substate_name[ctx_isp->substate_activated]);
+ return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_hw_error(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ return 0;
+}
+
+static int __cam_isp_ctx_sof_in_activated_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ struct cam_ctx_request *req;
+ struct cam_context *ctx = ctx_isp->base;
+
+ req = list_last_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ ctx_isp->boot_timestamp = sof_event_data->boot_time;
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_SOF, req->request_id);
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+ CAM_INFO(CAM_ISP, "state = %s IRQ is SOF frame_id = %lld",
+ substate_name[ctx_isp->substate_activated], ctx_isp->frame_id);
+
+
+ return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
+ void *evt_data)
+{
+ int rc = 0;
+ struct cam_ctx_request *req = NULL;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_context *ctx = ctx_isp->base;
+
+ if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
+ CAM_DBG(CAM_ISP, "invalid RUP");
+ goto end;
+ }
+
+ /*
+ * This is for the first update. The initial setting will
+ * cause the reg_upd in the first frame.
+ */
+ if (!list_empty(&ctx->wait_req_list)) {
+ req = list_first_entry(&ctx->wait_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ if (req_isp->num_fence_map_out == req_isp->num_acked)
+ list_add_tail(&req->list, &ctx->free_req_list);
+ else
+ CAM_ERR(CAM_ISP,
+ "receive rup in unexpected state");
+ }
+ if (req != NULL) {
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
+ req->request_id);
+ }
+ CAM_INFO(CAM_ISP, "state = %s IRQ is RUP", substate_name[ctx_isp->substate_activated]);
+end:
+ return rc;
+}
+
+static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
+ void *evt_data)
+{
+ struct cam_ctx_request *req;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_context *ctx = ctx_isp->base;
+ uint64_t request_id = 0;
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is EPOCH", substate_name[ctx_isp->substate_activated]);
+ if (list_empty(&ctx->wait_req_list)) {
+ /*
+ * If no wait req in epoch, this is an error case.
+ * The recovery is to go back to sof state
+ */
+ CAM_ERR(CAM_ISP, "No wait request");
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ /* Send SOF event as empty frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ goto end;
+ }
+
+ req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
+ list);
+ req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+ req_isp->bubble_detected = true;
+
+ CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
+ if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+ ctx->ctx_crm_intf->notify_err) {
+ struct cam_req_mgr_error_notify notify;
+
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.req_id = req->request_id;
+ notify.error = CRM_KMD_ERR_BUBBLE;
+ ctx->ctx_crm_intf->notify_err(&notify);
+ CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
+ ctx_isp->frame_id);
+ } else {
+ req_isp->bubble_report = 0;
+ }
+
+ /*
+ * Always move the request to active list. Let buf done
+ * function handles the rest.
+ */
+ CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ ctx_isp->active_req_cnt++;
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ }
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ CAM_DBG(CAM_ISP, "next substate %d",
+ ctx_isp->substate_activated);
+end:
+ if (request_id == 0) {
+ req = list_last_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
+ } else {
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id);
+ }
+ return 0;
+}
+
+
+static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
+ void *evt_data)
+{
+ int rc = 0;
+ struct cam_isp_hw_done_event_data *done =
+ (struct cam_isp_hw_done_event_data *) evt_data;
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is BUF_DONE", substate_name[ctx_isp->substate_activated]);
+ rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+ return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
+ void *evt_data)
+{
+ int rc = 0;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ struct cam_ctx_request *req;
+
+ CAM_INFO(CAM_ISP, "SURESH: state = %s IRQ is SOF frame_id = %lld",
+ substate_name[ctx_isp->substate_activated], ctx_isp->frame_id);
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ ctx_isp->boot_timestamp = sof_event_data->boot_time;
+
+ if (list_empty(&ctx->active_req_list))
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ else
+ CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ req = list_last_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ if (req)
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
+ ctx->req_list->request_id);
+ CAM_DBG(CAM_ISP, "next substate %d",
+ ctx_isp->substate_activated);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
+ void *evt_data)
+{
+ int rc = 0;
+ struct cam_isp_hw_done_event_data *done =
+ (struct cam_isp_hw_done_event_data *) evt_data;
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is BUF_DONE", substate_name[ctx_isp->substate_activated]);
+ rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+ return rc;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_isp_hw_done_event_data *done =
+ (struct cam_isp_hw_done_event_data *) evt_data;
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is BUF_DONE", substate_name[ctx_isp->substate_activated]);
+ rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+ return rc;
+}
+
+static int __cam_isp_ctx_epoch_in_bubble_applied(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_ctx_request *req;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_context *ctx = ctx_isp->base;
+ uint64_t request_id = 0;
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is BUF_DONE", substate_name[ctx_isp->substate_activated]);
+ /*
+ * This means we missed the reg upd ack. So we need to
+ * transition to BUBBLE state again.
+ */
+
+ if (list_empty(&ctx->wait_req_list)) {
+ /*
+ * If no pending req in epoch, this is an error case.
+ * Just go back to the bubble state.
+ */
+ CAM_ERR(CAM_ISP, "No pending request.");
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ goto end;
+ }
+
+ req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
+ list);
+ req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+ req_isp->bubble_detected = true;
+
+ if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+ ctx->ctx_crm_intf->notify_err) {
+ struct cam_req_mgr_error_notify notify;
+
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.req_id = req->request_id;
+ notify.error = CRM_KMD_ERR_BUBBLE;
+ ctx->ctx_crm_intf->notify_err(&notify);
+ CAM_DBG(CAM_REQ,
+ "Notify CRM about Bubble req_id %llu frame %lld",
+ req->request_id, ctx_isp->frame_id);
+ } else {
+ req_isp->bubble_report = 0;
+ }
+
+ /*
+ * Always move the request to active list. Let buf done
+ * function handles the rest.
+ */
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ ctx_isp->active_req_cnt++;
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+
+ if (!req_isp->bubble_report) {
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+ } else
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ } else
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+end:
+ req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
+ list);
+ if (req)
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
+ return 0;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble_applied(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_isp_hw_done_event_data *done =
+ (struct cam_isp_hw_done_event_data *) evt_data;
+
+ CAM_INFO(CAM_ISP, "state = %s IRQ is BUF_DONE", substate_name[ctx_isp->substate_activated]);
+ rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_DONE,
+ ctx_isp->base->req_list->request_id);
+ return rc;
+}
+
+static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
+ void *evt_data)
+{
+ int rc = 0;
+ uint32_t i = 0;
+ bool found = 0;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_to_report = NULL;
+ struct cam_ctx_request *req_to_dump = NULL;
+ struct cam_ctx_request *req_temp;
+ struct cam_isp_ctx_req *req_isp = NULL;
+ struct cam_isp_ctx_req *req_isp_to_report = NULL;
+ struct cam_req_mgr_error_notify notify;
+ uint64_t error_request_id = 0;
+ struct cam_hw_fence_map_entry *fence_map_out = NULL;
+
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_hw_error_event_data *error_event_data =
+ (struct cam_isp_hw_error_event_data *)evt_data;
+
+ uint32_t error_type = error_event_data->error_type;
+
+ CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
+ if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
+ (error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
+ notify.error = CRM_KMD_ERR_OVERFLOW;
+
+ /*
+ * The error is likely caused by first request on the active list.
+ * If active list is empty check wait list (maybe error hit as soon
+ * as RUP and we handle error before RUP.
+ */
+ if (list_empty(&ctx->active_req_list)) {
+ CAM_DBG(CAM_ISP,
+ "handling error with no active request");
+ if (list_empty(&ctx->wait_req_list)) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Error with no active/wait request");
+ goto end;
+ } else {
+ req_to_dump = list_first_entry(&ctx->wait_req_list,
+ struct cam_ctx_request, list);
+ }
+ } else {
+ req_to_dump = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ }
+
+ req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
+ cam_isp_ctx_dump_req(req_isp);
+
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id);
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ if (!req_isp->bubble_report) {
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ fence_map_out =
+ &req_isp->fence_map_out[i];
+ CAM_ERR(CAM_ISP, "req %llu, Sync fd %x",
+ req->request_id,
+ req_isp->fence_map_out[i].sync_id);
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ rc = cam_sync_signal(
+ fence_map_out->sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ fence_map_out->sync_id = -1;
+ }
+ }
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ ctx_isp->active_req_cnt--;
+ } else {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ goto move_to_pending;
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->wait_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ if (!req_isp->bubble_report) {
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ fence_map_out =
+ &req_isp->fence_map_out[i];
+ CAM_ERR(CAM_ISP, "req %llu, Sync fd %x",
+ req->request_id,
+ req_isp->fence_map_out[i].sync_id);
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ rc = cam_sync_signal(
+ fence_map_out->sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ fence_map_out->sync_id = -1;
+ }
+ }
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ ctx_isp->active_req_cnt--;
+ } else {
+ found = 1;
+ break;
+ }
+ }
+
+move_to_pending:
+ /*
+ * If bubble recovery is enabled on any request we need to move that
+ * request and all the subsequent requests to the pending list.
+ * Note:
+ * We need to traverse the active list in reverse order and add
+ * to head of pending list.
+ * e.g. pending current state: 10, 11 | active current state: 8, 9
+ * intermittent for loop iteration- pending: 9, 10, 11 | active: 8
+ * final state - pending: 8, 9, 10, 11 | active: NULL
+ */
+ if (found) {
+ list_for_each_entry_safe_reverse(req, req_temp,
+ &ctx->active_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ list_del_init(&req->list);
+ list_add(&req->list, &ctx->pending_req_list);
+ ctx_isp->active_req_cnt--;
+ }
+ list_for_each_entry_safe_reverse(req, req_temp,
+ &ctx->wait_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ list_del_init(&req->list);
+ list_add(&req->list, &ctx->pending_req_list);
+ ctx_isp->active_req_cnt--;
+ }
+ }
+
+end:
+ do {
+ if (list_empty(&ctx->pending_req_list)) {
+ error_request_id = ctx_isp->last_applied_req_id + 1;
+ req_isp = NULL;
+ break;
+ }
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ error_request_id = ctx_isp->last_applied_req_id;
+
+ if (req_isp->bubble_report) {
+ req_to_report = req;
+ req_isp_to_report = req_to_report->req_priv;
+ break;
+ }
+
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ if (req_isp->fence_map_out[i].sync_id != -1)
+ rc = cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ req_isp->fence_map_out[i].sync_id = -1;
+ }
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+
+ } while (req->request_id < ctx_isp->last_applied_req_id);
+
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.req_id = error_request_id;
+
+ if (req_isp_to_report && req_isp_to_report->bubble_report) {
+ if (error_event_data->recovery_enabled)
+ notify.error = CRM_KMD_ERR_BUBBLE;
+ } else {
+ notify.error = CRM_KMD_ERR_FATAL;
+ }
+
+ CAM_WARN(CAM_ISP, "Notify CRM: req %lld, frame %lld\n",
+ error_request_id, ctx_isp->frame_id);
+
+ ctx->ctx_crm_intf->notify_err(&notify);
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ } else {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify ERRROR to CRM");
+ rc = -EFAULT;
+ }
+
+ CAM_DBG(CAM_ISP, "Exit");
+
+ return rc;
+}
+
+static struct cam_isp_ctx_irq_ops
+ cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
+ /* SOF */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_sof_in_activated_state,
+ __cam_isp_ctx_reg_upd_in_sof,
+ __cam_isp_ctx_notify_sof_in_activated_state,
+ __cam_isp_ctx_notify_eof_in_activated_state,
+ NULL,
+ },
+ },
+ /* APPLIED */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_sof_in_activated_state,
+ __cam_isp_ctx_reg_upd_in_activated_state,
+ __cam_isp_ctx_epoch_in_applied,
+ __cam_isp_ctx_notify_eof_in_activated_state,
+ __cam_isp_ctx_buf_done_in_applied,
+ },
+ },
+ /* EPOCH */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_sof_in_epoch,
+ __cam_isp_ctx_reg_upd_in_epoch_state,
+ __cam_isp_ctx_notify_sof_in_activated_state,
+ __cam_isp_ctx_notify_eof_in_activated_state,
+ __cam_isp_ctx_buf_done_in_epoch,
+ },
+ },
+ /* BUBBLE */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_sof_in_activated_state,
+ NULL,
+ __cam_isp_ctx_notify_sof_in_activated_state,
+ __cam_isp_ctx_notify_eof_in_activated_state,
+ __cam_isp_ctx_buf_done_in_bubble,
+ },
+ },
+ /* Bubble Applied */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_sof_in_activated_state,
+ __cam_isp_ctx_reg_upd_in_activated_state,
+ __cam_isp_ctx_epoch_in_bubble_applied,
+ NULL,
+ __cam_isp_ctx_buf_done_in_bubble_applied,
+ },
+ },
+ /* HW ERROR */
+ {
+ .irq_ops = {
+ NULL,
+ __cam_isp_ctx_sof_in_activated_state,
+ __cam_isp_ctx_reg_upd_in_hw_error,
+ NULL,
+ NULL,
+ NULL,
+ },
+ },
+ /* HALT */
+ {
+ },
+};
+
+static int __cam_isp_ctx_apply_req_in_activated_state(
+ struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
+ uint32_t next_state)
+{
+ int rc = 0;
+ struct cam_ctx_request *req;
+ struct cam_ctx_request *active_req = NULL;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_isp_ctx_req *active_req_isp;
+ struct cam_isp_context *ctx_isp = NULL;
+ struct cam_hw_config_args cfg;
+
+ if (list_empty(&ctx->pending_req_list)) {
+ CAM_ERR(CAM_ISP, "No available request for Apply id %lld",
+ apply->request_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ /*
+ * When the pipeline has issue, the requests can be queued up in the
+ * pipeline. In this case, we should reject the additional request.
+ * The maximum number of request allowed to be outstanding is 2.
+ *
+ */
+ ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+
+ spin_lock_bh(&ctx->lock);
+ req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+ list);
+ spin_unlock_bh(&ctx->lock);
+
+ /*
+ * Check whether the request id is matching the tip, if not, this means
+ * we are in the middle of the error handling. Need to reject this apply
+ */
+ if (req->request_id != apply->request_id) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Invalid Request Id asking %llu existing %llu",
+ apply->request_id, req->request_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ CAM_DBG(CAM_REQ, "Apply request %lld in substate %d", req->request_id,
+ ctx_isp->substate_activated);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+ if (ctx_isp->active_req_cnt >= 2) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Reject apply request (id %lld) due to congestion(cnt = %d)",
+ req->request_id,
+ ctx_isp->active_req_cnt);
+
+ spin_lock_bh(&ctx->lock);
+ if (!list_empty(&ctx->active_req_list))
+ active_req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ else
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "WARNING: should not happen (cnt = %d) but active_list empty",
+ ctx_isp->active_req_cnt);
+ spin_unlock_bh(&ctx->lock);
+
+ if (active_req) {
+ active_req_isp =
+ (struct cam_isp_ctx_req *) active_req->req_priv;
+ __cam_isp_ctx_handle_buf_done_fail_log(active_req_isp);
+ }
+
+ rc = -EFAULT;
+ goto end;
+ }
+ req_isp->bubble_report = apply->report_if_bubble;
+
+ cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ cfg.request_id = req->request_id;
+ cfg.hw_update_entries = req_isp->cfg;
+ cfg.num_hw_update_entries = req_isp->num_cfg;
+ cfg.priv = &req_isp->hw_update_data;
+ cfg.init_packet = 0;
+
+ rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
+ } else {
+ spin_lock_bh(&ctx->lock);
+ ctx_isp->substate_activated = next_state;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ ctx_isp->last_applied_req_id = apply->request_id;
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->wait_req_list);
+ CAM_DBG(CAM_ISP, "new substate state %d, applied req %lld",
+ next_state, ctx_isp->last_applied_req_id);
+ spin_unlock_bh(&ctx->lock);
+ }
+end:
+ if (ctx_isp != NULL) {
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
+ ctx->req_list->request_id);
+ }
+ return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_sof(
+ struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ CAM_DBG(CAM_ISP, "current substate %d",
+ ctx_isp->substate_activated);
+ rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+ CAM_ISP_CTX_ACTIVATED_APPLIED);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_epoch(
+ struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ CAM_DBG(CAM_ISP, "current substate %d",
+ ctx_isp->substate_activated);
+ rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+ CAM_ISP_CTX_ACTIVATED_APPLIED);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_bubble(
+ struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ CAM_DBG(CAM_ISP, "current substate %d",
+ ctx_isp->substate_activated);
+ rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+ CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
+ struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
+{
+ int i, rc;
+ uint32_t cancel_req_id_found = 0;
+ struct cam_ctx_request *req;
+ struct cam_ctx_request *req_temp;
+ struct cam_isp_ctx_req *req_isp;
+ struct list_head flush_list;
+
+ INIT_LIST_HEAD(&flush_list);
+ if (list_empty(req_list)) {
+ CAM_DBG(CAM_ISP, "request list is empty");
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ CAM_ERR(CAM_ISP, "no request to cancel");
+ return -EINVAL;
+ } else
+ return 0;
+ }
+
+ CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
+ flush_req->type, flush_req->req_id);
+ list_for_each_entry_safe(req, req_temp, req_list, list) {
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ if (req->request_id != flush_req->req_id) {
+ continue;
+ } else {
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &flush_list);
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &flush_list);
+ }
+
+ list_for_each_entry_safe(req, req_temp, &flush_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
+ req->request_id,
+ req_isp->fence_map_out[i].sync_id);
+ rc = cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc)
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "signal fence failed\n");
+ req_isp->fence_map_out[i].sync_id = -1;
+ }
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+ !cancel_req_id_found)
+ CAM_DBG(CAM_ISP,
+ "Flush request id:%lld is not found in the list",
+ flush_req->req_id);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_flush_req_in_top_state(
+ struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush_req)
+{
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_isp_stop_args stop_isp;
+ struct cam_hw_stop_args stop_args;
+ struct cam_isp_start_args start_isp;
+ int rc = 0;
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ CAM_INFO(CAM_ISP, "Last request id to flush is %lld",
+ flush_req->req_id);
+ ctx->last_flush_req = flush_req->req_id;
+ }
+
+ CAM_DBG(CAM_ISP, "try to flush pending list");
+ spin_lock_bh(&ctx->lock);
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+ spin_unlock_bh(&ctx->lock);
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ /* if active and wait list are empty, return */
+ spin_lock_bh(&ctx->lock);
+ if ((list_empty(&ctx->wait_req_list)) &&
+ (list_empty(&ctx->active_req_list))) {
+ spin_unlock_bh(&ctx->lock);
+ CAM_DBG(CAM_ISP, "active and wait list are empty");
+ goto end;
+ }
+ spin_unlock_bh(&ctx->lock);
+
+ /* Stop hw first before active list flush */
+ stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY;
+ stop_isp.stop_only = true;
+ stop_args.args = (void *)&stop_isp;
+ ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+ &stop_args);
+
+ spin_lock_bh(&ctx->lock);
+ CAM_DBG(CAM_ISP, "try to flush wait list");
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
+ flush_req);
+ CAM_DBG(CAM_ISP, "try to flush active list");
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
+ flush_req);
+ ctx_isp->active_req_cnt = 0;
+ spin_unlock_bh(&ctx->lock);
+
+ /* Start hw */
+ start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ start_isp.start_only = true;
+ start_isp.hw_config.priv = NULL;
+
+ rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+ &start_isp);
+ }
+
+end:
+ CAM_DBG(CAM_ISP, "Flush request in top state %d",
+ ctx->state);
+ return rc;
+}
+
+static int __cam_isp_ctx_flush_req_in_ready(
+ struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush_req)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_ISP, "try to flush pending list");
+ spin_lock_bh(&ctx->lock);
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+
+ /* if nothing is in pending req list, change state to acquire */
+ if (list_empty(&ctx->pending_req_list))
+ ctx->state = CAM_CTX_ACQUIRED;
+ spin_unlock_bh(&ctx->lock);
+
+ trace_cam_context_state("ISP", ctx);
+
+ CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
+ ctx->state);
+ return rc;
+}
+
+static struct cam_ctx_ops
+ cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
+ /* SOF */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_apply_req_in_sof,
+ },
+ .irq_ops = NULL,
+ },
+ /* APPLIED */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* EPOCH */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_apply_req_in_epoch,
+ },
+ .irq_ops = NULL,
+ },
+ /* BUBBLE */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_apply_req_in_bubble,
+ },
+ .irq_ops = NULL,
+ },
+ /* Bubble Applied */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* HW ERROR */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* HALT */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+};
+
+static int __cam_isp_ctx_rdi_only_sof_in_top_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_req_mgr_trigger_notify notify;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ uint64_t request_id = 0;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ ctx_isp->boot_timestamp = sof_event_data->boot_time;
+
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ /*
+ * notify reqmgr with sof signal. Note, due to scheduling delay
+ * we can run into situation that two active requests has already
+ * be in the active queue while we try to do the notification.
+ * In this case, we need to skip the current notification. This
+ * helps the state machine to catch up the delay.
+ */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
+ ctx_isp->active_req_cnt <= 2) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+ notify.trigger = CAM_TRIGGER_POINT_SOF;
+
+ ctx->ctx_crm_intf->notify_trigger(&notify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+
+ /*
+ * It is idle frame with out any applied request id, send
+ * request id as zero
+ */
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ } else {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
+ }
+
+ if (list_empty(&ctx->active_req_list))
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ else
+ CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ CAM_DBG(CAM_ISP, "next substate %d",
+ ctx_isp->substate_activated);
+ return rc;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ ctx_isp->boot_timestamp = sof_event_data->boot_time;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_ctx_request *req;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ uint64_t request_id = 0;
+
+ /*
+ * Sof in bubble applied state means, reg update not received.
+ * before increment frame id and override time stamp value, send
+ * the previous sof time stamp that got captured in the
+ * sof in applied state.
+ */
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ ctx_isp->boot_timestamp = sof_event_data->boot_time;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ if (list_empty(&ctx->wait_req_list)) {
+ /*
+ * If no pending req in epoch, this is an error case.
+ * The recovery is to go back to sof state
+ */
+ CAM_ERR(CAM_ISP, "No wait request");
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ /* Send SOF event as empty frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ goto end;
+ }
+
+ req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
+ list);
+ req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+ req_isp->bubble_detected = true;
+
+ CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
+ if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+ ctx->ctx_crm_intf->notify_err) {
+ struct cam_req_mgr_error_notify notify;
+
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.req_id = req->request_id;
+ notify.error = CRM_KMD_ERR_BUBBLE;
+ ctx->ctx_crm_intf->notify_err(&notify);
+ CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
+ ctx_isp->frame_id);
+ } else {
+ req_isp->bubble_report = 0;
+ }
+
+ /*
+ * Always move the request to active list. Let buf done
+ * function handles the rest.
+ */
+ ctx_isp->active_req_cnt++;
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+
+ if (!req_isp->bubble_report) {
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+ } else
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ } else
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ /* change the state to bubble, as reg update has not come */
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+end:
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ uint32_t i;
+ struct cam_ctx_request *req;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_req_mgr_trigger_notify notify;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ struct cam_isp_ctx_req *req_isp;
+ uint64_t request_id = 0;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ ctx_isp->boot_timestamp = sof_event_data->boot_time;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+ /*
+ * Signal all active requests with error and move the all the active
+ * requests to free list
+ */
+ while (!list_empty(&ctx->active_req_list)) {
+ req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+ req_isp->num_fence_map_out);
+ for (i = 0; i < req_isp->num_fence_map_out; i++)
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ ctx_isp->active_req_cnt--;
+ }
+
+ /* notify reqmgr with sof signal */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+ notify.trigger = CAM_TRIGGER_POINT_SOF;
+
+ ctx->ctx_crm_intf->notify_trigger(&notify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+ }
+
+ /*
+ * It is idle frame with out any applied request id, send
+ * request id as zero
+ */
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ CAM_DBG(CAM_ISP, "next substate %d",
+ ctx_isp->substate_activated);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_ctx_request *req;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_req_mgr_trigger_notify notify;
+ uint64_t request_id = 0;
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+ /* notify reqmgr with sof signal*/
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
+ if (list_empty(&ctx->wait_req_list)) {
+ CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
+ goto error;
+ }
+ req = list_first_entry(&ctx->wait_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ request_id =
+ (req_isp->hw_update_data.packet_opcode_type ==
+ CAM_ISP_PACKET_INIT_DEV) ?
+ 0 : req->request_id;
+
+ if (req_isp->num_fence_map_out != 0) {
+ list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP,
+ "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ /* if packet has buffers, set correct request id */
+ request_id = req->request_id;
+ } else {
+ /* no io config, so the request is completed. */
+ list_add_tail(&req->list, &ctx->free_req_list);
+ CAM_DBG(CAM_ISP,
+ "move active req %lld to free list(cnt=%d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ }
+
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+ notify.trigger = CAM_TRIGGER_POINT_SOF;
+
+ ctx->ctx_crm_intf->notify_trigger(&notify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+ }
+ if (request_id)
+ ctx_isp->reported_req_id = request_id;
+
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+ return 0;
+error:
+ /* Send SOF event as idle frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ /*
+ * There is no request in the pending list, move the sub state machine
+ * to SOF sub state
+ */
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ return 0;
+}
+
+static struct cam_isp_ctx_irq_ops
+ cam_isp_ctx_rdi_only_activated_state_machine_irq
+ [CAM_ISP_CTX_ACTIVATED_MAX] = {
+ /* SOF */
+ {
+ .irq_ops = {
+ NULL,
+ __cam_isp_ctx_rdi_only_sof_in_top_state,
+ __cam_isp_ctx_reg_upd_in_sof,
+ NULL,
+ NULL,
+ NULL,
+ },
+ },
+ /* APPLIED */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_applied_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_applied,
+ },
+ },
+ /* EPOCH */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_top_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_epoch,
+ },
+ },
+ /* BUBBLE*/
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_bubble_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_bubble,
+ },
+ },
+ /* BUBBLE APPLIED ie PRE_BUBBLE */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
+ __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_bubble_applied,
+ },
+ },
+ /* HW ERROR */
+ {
+ },
+ /* HALT */
+ {
+ },
+};
+
+static int __cam_isp_ctx_rdi_only_apply_req_top_state(
+ struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ CAM_DBG(CAM_ISP, "current substate %d",
+ ctx_isp->substate_activated);
+ rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+ CAM_ISP_CTX_ACTIVATED_APPLIED);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ return rc;
+}
+
+static struct cam_ctx_ops
+ cam_isp_ctx_rdi_only_activated_state_machine
+ [CAM_ISP_CTX_ACTIVATED_MAX] = {
+ /* SOF */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+ },
+ .irq_ops = NULL,
+ },
+ /* APPLIED */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* EPOCH */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+ },
+ .irq_ops = NULL,
+ },
+ /* PRE BUBBLE */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* BUBBLE */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* HW ERROR */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* HALT */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+};
+
+/* top level state machine */
+static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc = 0;
+ struct cam_hw_release_args rel_arg;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_req_mgr_flush_request flush_req;
+
+ if (ctx->link_hdl != -1) {
+ CAM_ERR(CAM_ISP, "ctx expects release dev after unlink");
+ rc = -EAGAIN;
+ return rc;
+ }
+
+ if (cmd && ctx_isp->hw_ctx && ctx_isp->split_acquire) {
+ CAM_ERR(CAM_ISP, "ctx expects release HW before release dev");
+ return rc;
+ }
+
+ if (ctx_isp->hw_ctx) {
+ rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+ &rel_arg);
+ ctx_isp->hw_ctx = NULL;
+ }
+
+ ctx->session_hdl = -1;
+ ctx->dev_hdl = -1;
+ ctx->link_hdl = -1;
+ ctx->ctx_crm_intf = NULL;
+ ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
+ ctx_isp->hw_acquired = false;
+ ctx_isp->init_received = false;
+
+ /*
+ * Ideally, we should never have any active request here.
+ * But we still add some sanity check code here to help the debug
+ */
+ if (!list_empty(&ctx->active_req_list))
+ CAM_ERR(CAM_ISP, "Active list is not empty");
+
+ /* Flush all the pending request list */
+ flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
+ flush_req.link_hdl = ctx->link_hdl;
+ flush_req.dev_hdl = ctx->dev_hdl;
+
+ CAM_DBG(CAM_ISP, "try to flush pending list");
+ spin_lock_bh(&ctx->lock);
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
+ spin_unlock_bh(&ctx->lock);
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ trace_cam_context_state("ISP", ctx);
+ CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
+ ctx->ctx_id, ctx->state);
+ return rc;
+}
+
+static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
+ void *cmd)
+{
+ int rc = 0;
+ struct cam_hw_release_args rel_arg;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_req_mgr_flush_request flush_req;
+
+ if (ctx_isp->hw_ctx) {
+ rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+ &rel_arg);
+ ctx_isp->hw_ctx = NULL;
+ } else {
+ CAM_ERR(CAM_ISP, "No hw resources acquired for this ctx");
+ }
+
+ ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
+ ctx_isp->hw_acquired = false;
+ ctx_isp->init_received = false;
+
+ /*
+ * Ideally, we should never have any active request here.
+ * But we still add some sanity check code here to help the debug
+ */
+ if (!list_empty(&ctx->active_req_list))
+ CAM_WARN(CAM_ISP, "Active list is not empty");
+
+ /* Flush all the pending request list */
+ flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
+ flush_req.link_hdl = ctx->link_hdl;
+ flush_req.dev_hdl = ctx->dev_hdl;
+
+ CAM_DBG(CAM_ISP, "try to flush pending list");
+ spin_lock_bh(&ctx->lock);
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
+ spin_unlock_bh(&ctx->lock);
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ trace_cam_context_state("ISP", ctx);
+ CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
+ ctx->ctx_id, ctx->state);
+ return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_top_state(
+ struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+ int rc = 0, i;
+ struct cam_ctx_request *req = NULL;
+ struct cam_isp_ctx_req *req_isp;
+ uintptr_t packet_addr;
+ struct cam_packet *packet;
+ size_t len = 0;
+ struct cam_hw_prepare_update_args cfg;
+ struct cam_req_mgr_add_request add_req;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ CAM_DBG(CAM_ISP, "get free request object......");
+
+ /* get free request */
+ spin_lock_bh(&ctx->lock);
+ if (!list_empty(&ctx->free_req_list)) {
+ req = list_first_entry(&ctx->free_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ }
+ spin_unlock_bh(&ctx->lock);
+
+ if (!req) {
+ CAM_ERR(CAM_ISP, "No more request obj free");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+ /* for config dev, only memory handle is supported */
+ /* map packet from the memhandle */
+ rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+ &packet_addr, &len);
+ if (rc != 0) {
+ CAM_ERR(CAM_ISP, "Can not get packet address");
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ packet = (struct cam_packet *)(packet_addr + (uint32_t)cmd->offset);
+ CAM_DBG(CAM_ISP, "pack_handle %llx", cmd->packet_handle);
+ CAM_DBG(CAM_ISP, "packet address is 0x%zx", packet_addr);
+ CAM_DBG(CAM_ISP, "packet with length %zu, offset 0x%llx",
+ len, cmd->offset);
+ CAM_DBG(CAM_ISP, "Packet request id %lld",
+ packet->header.request_id);
+ CAM_DBG(CAM_ISP, "Packet size 0x%x", packet->header.size);
+ CAM_DBG(CAM_ISP, "packet op %d", packet->header.op_code);
+
+ if (packet->header.request_id <= ctx->last_flush_req) {
+ CAM_INFO(CAM_ISP,
+ "request %lld has been flushed, reject packet",
+ packet->header.request_id);
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ if (packet->header.request_id > ctx->last_flush_req)
+ ctx->last_flush_req = 0;
+
+ /* preprocess the configuration */
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.packet = packet;
+ cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ cfg.max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
+ cfg.hw_update_entries = req_isp->cfg;
+ cfg.max_out_map_entries = CAM_ISP_CTX_RES_MAX;
+ cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
+ cfg.out_map_entries = req_isp->fence_map_out;
+ cfg.in_map_entries = req_isp->fence_map_in;
+ cfg.priv = &req_isp->hw_update_data;
+ cfg.pf_data = &(req->pf_data);
+
+ CAM_DBG(CAM_ISP, "try to prepare config packet......");
+
+ rc = ctx->hw_mgr_intf->hw_prepare_update(
+ ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+ if (rc != 0) {
+ CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
+ rc = -EFAULT;
+ goto free_req;
+ }
+ req_isp->num_cfg = cfg.num_hw_update_entries;
+ req_isp->num_fence_map_out = cfg.num_out_map_entries;
+ req_isp->num_fence_map_in = cfg.num_in_map_entries;
+ req_isp->num_acked = 0;
+ req_isp->bubble_detected = false;
+
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can't get ref for fence %d",
+ req_isp->fence_map_out[i].sync_id);
+ goto put_ref;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d",
+ req_isp->num_cfg, req_isp->num_fence_map_out,
+ req_isp->num_fence_map_in);
+
+ req->request_id = packet->header.request_id;
+ req->status = 1;
+
+ CAM_DBG(CAM_ISP, "Packet request id %lld packet opcode:%d",
+ packet->header.request_id,
+ req_isp->hw_update_data.packet_opcode_type);
+
+ if (req_isp->hw_update_data.packet_opcode_type ==
+ CAM_ISP_PACKET_INIT_DEV) {
+ if (ctx->state < CAM_CTX_ACTIVATED) {
+ rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
+ ctx_isp->init_received = true;
+ } else {
+ rc = -EINVAL;
+ CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state");
+ }
+ } else {
+ if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
+ add_req.link_hdl = ctx->link_hdl;
+ add_req.dev_hdl = ctx->dev_hdl;
+ add_req.req_id = req->request_id;
+ add_req.skip_before_applying = 0;
+ rc = ctx->ctx_crm_intf->add_req(&add_req);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
+ req->request_id);
+ } else {
+ __cam_isp_ctx_enqueue_request_in_order(
+ ctx, req);
+ }
+ } else {
+ rc = -EINVAL;
+ CAM_ERR(CAM_ISP, "Recevied Update in wrong state");
+ }
+ }
+ if (rc)
+ goto put_ref;
+
+ CAM_DBG(CAM_REQ,
+ "Preprocessing Config req_id %lld successful on ctx %u",
+ req->request_id, ctx->ctx_id);
+
+ return rc;
+
+put_ref:
+ for (--i; i >= 0; i--) {
+ rc = cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id);
+ if (rc)
+ CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
+ req_isp->fence_map_out[i].sync_id);
+ }
+free_req:
+ spin_lock_bh(&ctx->lock);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock_bh(&ctx->lock);
+end:
+ return rc;
+}
+
+static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc = 0;
+ struct cam_hw_acquire_args param;
+ struct cam_isp_resource *isp_res = NULL;
+ struct cam_create_dev_hdl req_hdl_param;
+ struct cam_hw_release_args release;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_ISP, "HW interface is not ready");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP,
+ "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
+ cmd->session_handle, cmd->num_resources,
+ cmd->handle_type, cmd->resource_hdl);
+
+ if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) {
+ ctx_isp->split_acquire = true;
+ CAM_DBG(CAM_ISP, "Acquire dev handle");
+ goto get_dev_handle;
+ }
+
+ if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
+ CAM_ERR(CAM_ISP, "Too much resources in the acquire");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ /* for now we only support user pointer */
+ if (cmd->handle_type != 1) {
+ CAM_ERR(CAM_ISP, "Only user pointer is supported");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ isp_res = kzalloc(
+ sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
+ if (!isp_res) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP, "start copy %d resources from user",
+ cmd->num_resources);
+
+ if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
+ sizeof(*isp_res)*cmd->num_resources)) {
+ rc = -EFAULT;
+ goto free_res;
+ }
+
+ param.context_data = ctx;
+ param.event_cb = ctx->irq_cb_intf;
+ param.num_acq = cmd->num_resources;
+ param.acquire_info = (uintptr_t) isp_res;
+
+ /* call HW manager to reserve the resource */
+ rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+ &param);
+ if (rc != 0) {
+ CAM_ERR(CAM_ISP, "Acquire device failed");
+ goto free_res;
+ }
+
+ /* Query the context has rdi only resource */
+ hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "HW command failed");
+ goto free_hw;
+ }
+
+ if (isp_hw_cmd_args.u.is_rdi_only_context) {
+ /*
+ * this context has rdi only resource assign rdi only
+ * state machine
+ */
+ CAM_DBG(CAM_ISP, "RDI only session Context");
+
+ ctx_isp->substate_machine_irq =
+ cam_isp_ctx_rdi_only_activated_state_machine_irq;
+ ctx_isp->substate_machine =
+ cam_isp_ctx_rdi_only_activated_state_machine;
+ } else {
+ CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
+ ctx_isp->substate_machine_irq =
+ cam_isp_ctx_activated_state_machine_irq;
+ ctx_isp->substate_machine =
+ cam_isp_ctx_activated_state_machine;
+ }
+
+ ctx_isp->rdi_only_context = isp_hw_cmd_args.u.is_rdi_only_context;
+ ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+ ctx_isp->hw_acquired = true;
+ ctx_isp->split_acquire = false;
+ ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
+
+ kfree(isp_res);
+ isp_res = NULL;
+
+get_dev_handle:
+
+ req_hdl_param.session_hdl = cmd->session_handle;
+ /* bridge is not ready for these flags. so false for now */
+ req_hdl_param.v4l2_sub_dev_flag = 0;
+ req_hdl_param.media_entity_flag = 0;
+ req_hdl_param.ops = ctx->crm_ctx_intf;
+ req_hdl_param.priv = ctx;
+
+ CAM_DBG(CAM_ISP, "get device handle form bridge");
+ ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+ if (ctx->dev_hdl <= 0) {
+ rc = -EFAULT;
+ CAM_ERR(CAM_ISP, "Can not create device handle");
+ goto free_hw;
+ }
+ cmd->dev_handle = ctx->dev_hdl;
+
+ /* store session information */
+ ctx->session_hdl = cmd->session_handle;
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ trace_cam_context_state("ISP", ctx);
+ CAM_DBG(CAM_ISP,
+ "Acquire success on session_hdl 0x%x num_rsrces %d ctx %u",
+ cmd->session_handle, cmd->num_resources, ctx->ctx_id);
+
+ return rc;
+
+free_hw:
+ release.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ if (ctx_isp->hw_acquired)
+ ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+ &release);
+ ctx_isp->hw_ctx = NULL;
+ ctx_isp->hw_acquired = false;
+free_res:
+ kfree(isp_res);
+end:
+ return rc;
+}
+
+static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
+ void *args)
+{
+ int rc = 0;
+ struct cam_acquire_hw_cmd_v1 *cmd =
+ (struct cam_acquire_hw_cmd_v1 *)args;
+ struct cam_hw_acquire_args param;
+ struct cam_hw_release_args release;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
+ struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_ISP, "HW interface is not ready");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP,
+ "session_hdl 0x%x, hdl type %d, res %lld",
+ cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
+
+ /* for now we only support user pointer */
+ if (cmd->handle_type != 1) {
+ CAM_ERR(CAM_ISP, "Only user pointer is supported");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (cmd->data_size < sizeof(*acquire_hw_info)) {
+ CAM_ERR(CAM_ISP, "data_size is not a valid value");
+ goto end;
+ }
+
+ acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
+ if (!acquire_hw_info) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP, "start copy resources from user");
+
+ if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
+ cmd->data_size)) {
+ rc = -EFAULT;
+ goto free_res;
+ }
+
+ param.context_data = ctx;
+ param.event_cb = ctx->irq_cb_intf;
+ param.num_acq = CAM_API_COMPAT_CONSTANT;
+ param.acquire_info_size = cmd->data_size;
+ param.acquire_info = (uint64_t) acquire_hw_info;
+
+ /* call HW manager to reserve the resource */
+ rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+ &param);
+ if (rc != 0) {
+ CAM_ERR(CAM_ISP, "Acquire device failed");
+ goto free_res;
+ }
+
+ /* Query the context has rdi only resource */
+ hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "HW command failed");
+ goto free_hw;
+ }
+
+ if (isp_hw_cmd_args.u.is_rdi_only_context) {
+ /*
+ * this context has rdi only resource assign rdi only
+ * state machine
+ */
+ CAM_DBG(CAM_ISP, "RDI only session Context");
+
+ ctx_isp->substate_machine_irq =
+ cam_isp_ctx_rdi_only_activated_state_machine_irq;
+ ctx_isp->substate_machine =
+ cam_isp_ctx_rdi_only_activated_state_machine;
+ } else {
+ CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
+ ctx_isp->substate_machine_irq =
+ cam_isp_ctx_activated_state_machine_irq;
+ ctx_isp->substate_machine =
+ cam_isp_ctx_activated_state_machine;
+ }
+
+ ctx_isp->rdi_only_context = isp_hw_cmd_args.u.is_rdi_only_context;
+ ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+ ctx_isp->hw_acquired = true;
+ ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
+
+ trace_cam_context_state("ISP", ctx);
+ CAM_DBG(CAM_ISP,
+ "Acquire success on session_hdl 0x%xs RDI only %d ctx %u",
+ ctx->session_hdl,
+ (isp_hw_cmd_args.u.is_rdi_only_context ? 1 : 0), ctx->ctx_id);
+ kfree(acquire_hw_info);
+ return rc;
+
+free_hw:
+ release.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+ ctx_isp->hw_ctx = NULL;
+ ctx_isp->hw_acquired = false;
+free_res:
+ kfree(acquire_hw_info);
+end:
+ return rc;
+}
+
+static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx,
+ void *args)
+{
+ int rc = -EINVAL;
+ uint32_t api_version;
+
+ if (!ctx || !args) {
+ CAM_ERR(CAM_ISP, "Invalid input pointer");
+ return rc;
+ }
+
+ api_version = *((uint32_t *)args);
+ if (api_version == 1)
+ rc = __cam_isp_ctx_acquire_hw_v1(ctx, args);
+ else
+ CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ if (!ctx_isp->hw_acquired) {
+ CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
+ return -EINVAL;
+ }
+
+ rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
+
+ if (!rc && (ctx->link_hdl >= 0)) {
+ ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
+ }
+
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
+ return rc;
+}
+
+static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *link)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ CAM_DBG(CAM_ISP, "Enter.........");
+
+ ctx->link_hdl = link->link_hdl;
+ ctx->ctx_crm_intf = link->crm_cb;
+ ctx_isp->subscribe_event = link->subscribe_event;
+
+ /* change state only if we had the init config */
+ if (ctx_isp->init_received) {
+ ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
+ }
+
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+ int rc = 0;
+
+ ctx->link_hdl = -1;
+ ctx->ctx_crm_intf = NULL;
+
+ return rc;
+}
+
+static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
+ struct cam_req_mgr_device_info *dev_info)
+{
+ int rc = 0;
+
+ dev_info->dev_hdl = ctx->dev_hdl;
+ strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
+ dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
+ dev_info->p_delay = 1;
+ dev_info->trigger = CAM_TRIGGER_POINT_SOF;
+
+ return rc;
+}
+
+static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+ struct cam_isp_start_args start_isp;
+ struct cam_ctx_request *req;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ if (cmd->session_handle != ctx->session_hdl ||
+ cmd->dev_handle != ctx->dev_hdl) {
+ rc = -EPERM;
+ goto end;
+ }
+
+ if (list_empty(&ctx->pending_req_list)) {
+ /* should never happen */
+ CAM_ERR(CAM_ISP, "Start device with empty configuration");
+ rc = -EFAULT;
+ goto end;
+ } else {
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ }
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+ if (!ctx_isp->hw_ctx) {
+ CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ start_isp.hw_config.request_id = req->request_id;
+ start_isp.hw_config.hw_update_entries = req_isp->cfg;
+ start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
+ start_isp.hw_config.priv = &req_isp->hw_update_data;
+ start_isp.hw_config.init_packet = 1;
+ start_isp.start_only = false;
+
+ ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
+ ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
+ CAM_ISP_CTX_ACTIVATED_APPLIED :
+ (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
+ CAM_ISP_CTX_ACTIVATED_SOF;
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ /*
+ * Only place to change state before calling the hw due to
+ * hardware tasklet has higher priority that can cause the
+ * irq handling comes early
+ */
+ ctx->state = CAM_CTX_ACTIVATED;
+ trace_cam_context_state("ISP", ctx);
+ rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+ &start_isp);
+ if (rc) {
+ /* HW failure. user need to clean up the resource */
+ CAM_ERR(CAM_ISP, "Start HW failed");
+ ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
+ goto end;
+ }
+ CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
+
+ list_del_init(&req->list);
+
+ if (req_isp->num_fence_map_out) {
+ list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ } else {
+ list_add_tail(&req->list, &ctx->wait_req_list);
+ }
+end:
+ return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+ int rc = 0;
+
+ ctx->link_hdl = -1;
+ ctx->ctx_crm_intf = NULL;
+ ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ISP", ctx);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated_unlock(
+ struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
+{
+ int rc = 0;
+ uint32_t i;
+ struct cam_hw_stop_args stop;
+ struct cam_ctx_request *req;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_isp_stop_args stop_isp;
+
+ /* Mask off all the incoming hardware events */
+ spin_lock_bh(&ctx->lock);
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+ spin_unlock_bh(&ctx->lock);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+ CAM_INFO(CAM_ISP, "state changed to %s", substate_name[ctx_isp->substate_activated]);
+
+ /* stop hw first */
+ if (ctx_isp->hw_ctx) {
+ stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
+
+ if (stop_cmd)
+ stop_isp.hw_stop_cmd =
+ CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY;
+ else
+ stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+
+ stop_isp.stop_only = false;
+ stop.args = (void *) &stop_isp;
+ ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+ &stop);
+ }
+
+ while (!list_empty(&ctx->pending_req_list)) {
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
+ req_isp->num_fence_map_out);
+ for (i = 0; i < req_isp->num_fence_map_out; i++)
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+
+ while (!list_empty(&ctx->wait_req_list)) {
+ req = list_first_entry(&ctx->wait_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
+ req_isp->num_fence_map_out);
+ for (i = 0; i < req_isp->num_fence_map_out; i++)
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+
+ while (!list_empty(&ctx->active_req_list)) {
+ req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+ req_isp->num_fence_map_out);
+ for (i = 0; i < req_isp->num_fence_map_out; i++)
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+ ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
+
+ CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u",
+ ctx->state, ctx->ctx_id);
+ return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *)ctx->ctx_priv;
+
+ __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd);
+ ctx_isp->init_received = false;
+ ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ISP", ctx);
+ return rc;
+}
+
+static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
+
+ rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx,
+ void *cmd)
+{
+ int rc = 0;
+
+ rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
+
+ rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_handle_sof_freeze_evt(
+ struct cam_context *ctx)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
+ isp_hw_cmd_args.u.sof_irq_enable = 1;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
+ struct cam_req_mgr_link_evt_data *link_evt_data)
+{
+ int rc = 0;
+
+ switch (link_evt_data->evt_type) {
+ case CAM_REQ_MGR_LINK_EVT_ERR:
+ /* No need to handle this message now */
+ break;
+ case CAM_REQ_MGR_LINK_EVT_PAUSE:
+ __cam_isp_ctx_link_pause(ctx);
+ break;
+ case CAM_REQ_MGR_LINK_EVT_RESUME:
+ __cam_isp_ctx_link_resume(ctx);
+ break;
+ case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE:
+ __cam_isp_ctx_handle_sof_freeze_evt(ctx);
+ break;
+ default:
+ CAM_WARN(CAM_ISP, "Unknown event from CRM");
+ break;
+ }
+ return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+ int rc = 0;
+
+ CAM_WARN(CAM_ISP,
+ "Received unlink in activated state. It's unexpected");
+
+ rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
+ if (rc)
+ CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
+
+ rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
+
+ return rc;
+}
+
+static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
+ struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_ctx_ops *ctx_ops = NULL;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ trace_cam_apply_req("ISP", apply->request_id);
+ CAM_DBG(CAM_ISP, "Enter: apply req in Substate %d request _id:%lld",
+ ctx_isp->substate_activated, apply->request_id);
+ ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
+ if (ctx_ops->crm_ops.apply_req) {
+ rc = ctx_ops->crm_ops.apply_req(ctx, apply);
+ } else {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No handle function in activated substate %d",
+ ctx_isp->substate_activated);
+ rc = -EFAULT;
+ }
+
+ if (rc)
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Apply failed in active substate %d",
+ ctx_isp->substate_activated);
+ return rc;
+}
+
+
+
+static int __cam_isp_ctx_handle_irq_in_activated(void *context,
+ uint32_t evt_id, void *evt_data)
+{
+ int rc = 0;
+ struct cam_isp_ctx_irq_ops *irq_ops = NULL;
+ struct cam_context *ctx = (struct cam_context *)context;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *)ctx->ctx_priv;
+
+ spin_lock(&ctx->lock);
+
+ trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
+ __cam_isp_ctx_get_event_ts(evt_id, evt_data));
+
+ CAM_DBG(CAM_ISP, "Enter: State %d, Substate %d, evt id %d",
+ ctx->state, ctx_isp->substate_activated, evt_id);
+ irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated];
+ if (irq_ops->irq_ops[evt_id]) {
+ rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data);
+ } else {
+ CAM_DBG(CAM_ISP, "No handle function for substate %d",
+ ctx_isp->substate_activated);
+ __cam_isp_ctx_dump_state_monitor_array(ctx_isp);
+ }
+
+ CAM_DBG(CAM_ISP, "Exit: State %d Substate %d",
+ ctx->state, ctx_isp->substate_activated);
+ spin_unlock(&ctx->lock);
+ return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+ cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
+ /* Uninit */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Available */
+ {
+ .ioctl_ops = {
+ .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Acquired */
+ {
+ .ioctl_ops = {
+ .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
+ .release_dev = __cam_isp_ctx_release_dev_in_top_state,
+ .config_dev = __cam_isp_ctx_config_dev_in_acquired,
+ .release_hw = __cam_isp_ctx_release_hw_in_top_state,
+ },
+ .crm_ops = {
+ .link = __cam_isp_ctx_link_in_acquired,
+ .unlink = __cam_isp_ctx_unlink_in_acquired,
+ .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+ .flush_req = __cam_isp_ctx_flush_req_in_top_state,
+ },
+ .irq_ops = NULL,
+ .pagefault_ops = cam_isp_context_dump_active_request,
+ },
+ /* Ready */
+ {
+ .ioctl_ops = {
+ .start_dev = __cam_isp_ctx_start_dev_in_ready,
+ .release_dev = __cam_isp_ctx_release_dev_in_top_state,
+ .config_dev = __cam_isp_ctx_config_dev_in_top_state,
+ .release_hw = __cam_isp_ctx_release_hw_in_top_state,
+ },
+ .crm_ops = {
+ .unlink = __cam_isp_ctx_unlink_in_ready,
+ .flush_req = __cam_isp_ctx_flush_req_in_ready,
+ },
+ .irq_ops = NULL,
+ .pagefault_ops = cam_isp_context_dump_active_request,
+ },
+ /* Activated */
+ {
+ .ioctl_ops = {
+ .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
+ .release_dev = __cam_isp_ctx_release_dev_in_activated,
+ .config_dev = __cam_isp_ctx_config_dev_in_top_state,
+ .release_hw = __cam_isp_ctx_release_hw_in_activated,
+ },
+ .crm_ops = {
+ .unlink = __cam_isp_ctx_unlink_in_activated,
+ .apply_req = __cam_isp_ctx_apply_req,
+ .flush_req = __cam_isp_ctx_flush_req_in_top_state,
+ .process_evt = __cam_isp_ctx_process_evt,
+ },
+ .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
+ .pagefault_ops = cam_isp_context_dump_active_request,
+ },
+};
+
+
+static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
+ uint32_t buf_info)
+{
+
+ struct cam_context *ctx = (struct cam_context *)data;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_temp = NULL;
+ struct cam_isp_ctx_req *req_isp = NULL;
+ struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
+ struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
+ bool mem_found = false;
+ int rc = 0;
+
+ struct cam_isp_context *isp_ctx =
+ (struct cam_isp_context *)ctx->ctx_priv;
+
+ if (!isp_ctx) {
+ CAM_ERR(CAM_ISP, "Invalid isp ctx");
+ return -EINVAL;
+ }
+
+ CAM_INFO(CAM_ISP, "iommu fault handler for isp ctx %d state %d",
+ ctx->ctx_id, ctx->state);
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ hw_update_data = &req_isp->hw_update_data;
+ pf_dbg_entry = &(req->pf_data);
+ CAM_INFO(CAM_ISP, "req_id : %lld ", req->request_id);
+
+ rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+ iova, buf_info, &mem_found);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Failed to dump pf info");
+
+ if (mem_found)
+ CAM_ERR(CAM_ISP, "Found page fault in req %lld %d",
+ req->request_id, rc);
+ }
+
+ CAM_INFO(CAM_ISP, "Iterating over wait_list of isp ctx %d state %d",
+ ctx->ctx_id, ctx->state);
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->wait_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ hw_update_data = &req_isp->hw_update_data;
+ pf_dbg_entry = &(req->pf_data);
+ CAM_INFO(CAM_ISP, "req_id : %lld ", req->request_id);
+
+ rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+ iova, buf_info, &mem_found);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Failed to dump pf info");
+
+ if (mem_found)
+ CAM_ERR(CAM_ISP, "Found page fault in req %lld %d",
+ req->request_id, rc);
+ }
+
+ return rc;
+}
+
+int cam_isp_context_init(struct cam_isp_context *ctx,
+ struct cam_context *ctx_base,
+ struct cam_req_mgr_kmd_ops *crm_node_intf,
+ struct cam_hw_mgr_intf *hw_intf,
+ uint32_t ctx_id)
+
+{
+ int rc = -1;
+ int i;
+
+ if (!ctx || !ctx_base) {
+ CAM_ERR(CAM_ISP, "Invalid Context");
+ goto err;
+ }
+
+ /* ISP context setup */
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->base = ctx_base;
+ ctx->frame_id = 0;
+ ctx->active_req_cnt = 0;
+ ctx->reported_req_id = 0;
+ ctx->hw_ctx = NULL;
+ ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ ctx->substate_machine = cam_isp_ctx_activated_state_machine;
+ ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
+
+ for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+ ctx->req_base[i].req_priv = &ctx->req_isp[i];
+ ctx->req_isp[i].base = &ctx->req_base[i];
+ }
+
+ /* camera context setup */
+ rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id,
+ crm_node_intf, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Camera Context Base init failed");
+ goto err;
+ }
+
+ /* link camera context with isp context */
+ ctx_base->state_machine = cam_isp_ctx_top_state_machine;
+ ctx_base->ctx_priv = ctx;
+
+ /* initializing current state for error logging */
+ for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
+ ctx->cam_isp_ctx_state_monitor[i].curr_state =
+ CAM_ISP_CTX_ACTIVATED_MAX;
+ }
+ atomic64_set(&ctx->state_monitor_head, -1);
+err:
+ return rc;
+}
+
+int cam_isp_context_deinit(struct cam_isp_context *ctx)
+{
+ int rc = 0;
+
+ if (ctx->base)
+ cam_context_deinit(ctx->base);
+
+ if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
+ CAM_ERR(CAM_ISP, "ISP context substate is invalid");
+
+ memset(ctx, 0, sizeof(*ctx));
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_context.h
new file mode 100644
index 000000000000..5ebd82ead470
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_context.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_CONTEXT_H_
+#define _CAM_ISP_CONTEXT_H_
+
+
+#include <linux/spinlock.h>
+#include <uapi/media/cam_isp.h>
+#include <uapi/media/cam_defs.h>
+
+#include "cam_context.h"
+#include "cam_isp_hw_mgr_intf.h"
+
+/*
+ * Maximum hw resource - This number is based on the maximum
+ * output port resource. The current maximum resource number
+ * is 24.
+ */
+#define CAM_ISP_CTX_RES_MAX 24
+
+/*
+ * Maximum configuration entry size - This is based on the
+ * worst case DUAL IFE use case plus some margin.
+ */
+#define CAM_ISP_CTX_CFG_MAX 22
+
+/*
+ * Maximum entries in state monitoring array for error logging
+ */
+#define CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES 20
+
+/* forward declaration */
+struct cam_isp_context;
+
+/* cam isp context irq handling function type */
+typedef int (*cam_isp_hw_event_cb_func)(struct cam_isp_context *ctx_isp,
+ void *evt_data);
+
+/**
+ * enum cam_isp_ctx_activated_substate - sub states for activated
+ *
+ */
+enum cam_isp_ctx_activated_substate {
+ CAM_ISP_CTX_ACTIVATED_SOF,
+ CAM_ISP_CTX_ACTIVATED_APPLIED,
+ CAM_ISP_CTX_ACTIVATED_EPOCH,
+ CAM_ISP_CTX_ACTIVATED_BUBBLE,
+ CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
+ CAM_ISP_CTX_ACTIVATED_HW_ERROR,
+ CAM_ISP_CTX_ACTIVATED_HALT,
+ CAM_ISP_CTX_ACTIVATED_MAX,
+};
+
+/**
+ * enum cam_isp_state_change_trigger - Different types of ISP events
+ *
+ */
+enum cam_isp_state_change_trigger {
+ CAM_ISP_STATE_CHANGE_TRIGGER_ERROR,
+ CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
+ CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
+ CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
+ CAM_ISP_STATE_CHANGE_TRIGGER_EOF,
+ CAM_ISP_STATE_CHANGE_TRIGGER_DONE,
+ CAM_ISP_STATE_CHANGE_TRIGGER_MAX
+};
+
+/**
+ * struct cam_isp_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops: Array of handle function pointers.
+ *
+ */
+struct cam_isp_ctx_irq_ops {
+ cam_isp_hw_event_cb_func irq_ops[CAM_ISP_HW_EVENT_MAX];
+};
+
+/**
+ * struct cam_isp_ctx_req - ISP context request object
+ *
+ * @base: Common request object ponter
+ * @cfg: ISP hardware configuration array
+ * @num_cfg: Number of ISP hardware configuration entries
+ * @fence_map_out: Output fence mapping array
+ * @num_fence_map_out: Number of the output fence map
+ * @fence_map_in: Input fence mapping array
+ * @num_fence_map_in: Number of input fence map
+ * @num_acked: Count to track acked entried for output.
+ * If count equals the number of fence out, it means
+ * the request has been completed.
+ * @bubble_report: Flag to track if bubble report is active on
+ * current request
+ * @hw_update_data: HW update data for this request
+ *
+ */
+struct cam_isp_ctx_req {
+ struct cam_ctx_request *base;
+
+ struct cam_hw_update_entry cfg[CAM_ISP_CTX_CFG_MAX];
+ uint32_t num_cfg;
+ struct cam_hw_fence_map_entry fence_map_out
+ [CAM_ISP_CTX_RES_MAX];
+ uint32_t num_fence_map_out;
+ struct cam_hw_fence_map_entry fence_map_in[CAM_ISP_CTX_RES_MAX];
+ uint32_t num_fence_map_in;
+ uint32_t num_acked;
+ int32_t bubble_report;
+ struct cam_isp_prepare_hw_update_data hw_update_data;
+ bool bubble_detected;
+};
+
+/**
+ * struct cam_isp_context_state_monitor - ISP context state
+ * monitoring for
+ * debug purposes
+ *
+ *@curr_state: Current sub state that received req
+ *@req_type: Event type of incoming req
+ *@req_id: Request id
+ *@evt_time_stamp Current time stamp
+ *
+ */
+struct cam_isp_context_state_monitor {
+ enum cam_isp_ctx_activated_substate curr_state;
+ enum cam_isp_state_change_trigger trigger;
+ uint32_t req_id;
+ int64_t frame_id;
+ uint64_t evt_time_stamp;
+};
+
+/**
+ * struct cam_isp_context - ISP context object
+ *
+ * @base: Common context object pointer
+ * @frame_id: Frame id tracking for the isp context
+ * @substate_actiavted: Current substate for the activated state.
+ * @substate_machine: ISP substate machine for external interface
+ * @substate_machine_irq: ISP substate machine for irq handling
+ * @req_base: Common request object storage
+ * @req_isp: ISP private request object storage
+ * @hw_ctx: HW object returned by the acquire device command
+ * @sof_timestamp_val: Captured time stamp value at sof hw event
+ * @boot_timestamp: Boot time stamp for a given req_id
+ * @active_req_cnt: Counter for the active request
+ * @reported_req_id: Last reported request id
+ * @subscribe_event: The irq event mask that CRM subscribes to, IFE
+ * will invoke CRM cb at those event.
+ * @last_applied_req_id: Last applied request id
+ * @state_monitor_head: Write index to the state monitoring array
+ * @cam_isp_ctx_state_monitor: State monitoring array
+ * @rdi_only_context: Get context type information.
+ * true, if context is rdi only context
+ * @hw_acquired: Indicate whether HW resources are acquired
+ * @init_received: Indicate whether init config packet is received
+ * @split_acquire: Indicate whether a separate acquire is expected
+ *
+ */
+struct cam_isp_context {
+ struct cam_context *base;
+
+ int64_t frame_id;
+ uint32_t substate_activated;
+ struct cam_ctx_ops *substate_machine;
+ struct cam_isp_ctx_irq_ops *substate_machine_irq;
+
+ struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+ struct cam_isp_ctx_req req_isp[CAM_CTX_REQ_MAX];
+
+ void *hw_ctx;
+ uint64_t sof_timestamp_val;
+ uint64_t boot_timestamp;
+ int32_t active_req_cnt;
+ int64_t reported_req_id;
+ uint32_t subscribe_event;
+ int64_t last_applied_req_id;
+ atomic64_t state_monitor_head;
+ struct cam_isp_context_state_monitor cam_isp_ctx_state_monitor[
+ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES];
+ bool rdi_only_context;
+ bool hw_acquired;
+ bool init_received;
+ bool split_acquire;
+};
+
+/**
+ * cam_isp_context_init()
+ *
+ * @brief: Initialization function for the ISP context
+ *
+ * @ctx: ISP context obj to be initialized
+ * @bridge_ops: Bridge call back funciton
+ * @hw_intf: ISP hw manager interface
+ * @ctx_id: ID for this context
+ *
+ */
+int cam_isp_context_init(struct cam_isp_context *ctx,
+ struct cam_context *ctx_base,
+ struct cam_req_mgr_kmd_ops *bridge_ops,
+ struct cam_hw_mgr_intf *hw_intf,
+ uint32_t ctx_id);
+
+/**
+ * cam_isp_context_deinit()
+ *
+ * @brief: Deinitialize function for the ISP context
+ *
+ * @ctx: ISP context obj to be deinitialized
+ *
+ */
+int cam_isp_context_deinit(struct cam_isp_context *ctx);
+
+
+#endif /* __CAM_ISP_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_dev.c
new file mode 100644
index 000000000000..d8b7a7b5e73e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_dev.c
@@ -0,0 +1,205 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <uapi/media/cam_req_mgr.h>
+#include "cam_isp_dev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_node.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+
+static struct cam_isp_dev g_isp_dev;
+
+static void cam_isp_dev_iommu_fault_handler(
+ struct iommu_domain *domain, struct device *dev, unsigned long iova,
+ int flags, void *token, uint32_t buf_info)
+{
+ int i = 0;
+ struct cam_node *node = NULL;
+
+ if (!token) {
+ CAM_ERR(CAM_ISP, "invalid token in page handler cb");
+ return;
+ }
+
+ node = (struct cam_node *)token;
+
+ for (i = 0; i < node->ctx_size; i++)
+ cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+ buf_info);
+}
+
+static const struct of_device_id cam_isp_dt_match[] = {
+ {
+ .compatible = "qcom,cam-isp"
+ },
+ {}
+};
+
+static int cam_isp_subdev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ mutex_lock(&g_isp_dev.isp_mutex);
+ g_isp_dev.open_cnt++;
+ mutex_unlock(&g_isp_dev.isp_mutex);
+
+ return 0;
+}
+
+static int cam_isp_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct cam_node *node = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&g_isp_dev.isp_mutex);
+ if (g_isp_dev.open_cnt <= 0) {
+ CAM_DBG(CAM_ISP, "ISP subdev is already closed");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ g_isp_dev.open_cnt--;
+ if (!node) {
+ CAM_ERR(CAM_ISP, "Node ptr is NULL");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (g_isp_dev.open_cnt == 0)
+ cam_node_shutdown(node);
+
+end:
+ mutex_unlock(&g_isp_dev.isp_mutex);
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops cam_isp_subdev_internal_ops = {
+ .close = cam_isp_subdev_close,
+ .open = cam_isp_subdev_open,
+};
+
+static int cam_isp_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ int i;
+
+ /* clean up resources */
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
+ if (rc)
+ CAM_ERR(CAM_ISP, "ISP context %d deinit failed",
+ i);
+ }
+
+ rc = cam_subdev_remove(&g_isp_dev.sd);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Unregister failed");
+
+ memset(&g_isp_dev, 0, sizeof(g_isp_dev));
+ return 0;
+}
+
+static int cam_isp_dev_probe(struct platform_device *pdev)
+{
+ int rc = -1;
+ int i;
+ struct cam_hw_mgr_intf hw_mgr_intf;
+ struct cam_node *node;
+ int iommu_hdl = -1;
+
+ g_isp_dev.sd.internal_ops = &cam_isp_subdev_internal_ops;
+ /* Initialze the v4l2 subdevice first. (create cam_node) */
+ rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
+ CAM_IFE_DEVICE_TYPE);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "ISP cam_subdev_probe failed!");
+ goto err;
+ }
+ node = (struct cam_node *) g_isp_dev.sd.token;
+
+ memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
+ rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf, &iommu_hdl);
+ if (rc != 0) {
+ CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
+ goto unregister;
+ }
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_isp_context_init(&g_isp_dev.ctx_isp[i],
+ &g_isp_dev.ctx[i],
+ &node->crm_node_intf,
+ &node->hw_mgr_intf,
+ i);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "ISP context init failed!");
+ goto unregister;
+ }
+ }
+
+ rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
+ CAM_ISP_DEV_NAME);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "ISP node init failed!");
+ goto unregister;
+ }
+
+ cam_smmu_set_client_page_fault_handler(iommu_hdl,
+ cam_isp_dev_iommu_fault_handler, node);
+
+ mutex_init(&g_isp_dev.isp_mutex);
+
+ CAM_INFO(CAM_ISP, "Camera ISP probe complete");
+
+ return 0;
+unregister:
+ rc = cam_subdev_remove(&g_isp_dev.sd);
+err:
+ return rc;
+}
+
+
+static struct platform_driver isp_driver = {
+ .probe = cam_isp_dev_probe,
+ .remove = cam_isp_dev_remove,
+ .driver = {
+ .name = "cam_isp",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_isp_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_isp_dev_init_module(void)
+{
+ return platform_driver_register(&isp_driver);
+}
+
+static void __exit cam_isp_dev_exit_module(void)
+{
+ platform_driver_unregister(&isp_driver);
+}
+
+module_init(cam_isp_dev_init_module);
+module_exit(cam_isp_dev_exit_module);
+MODULE_DESCRIPTION("MSM ISP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_dev.h b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_dev.h
new file mode 100644
index 000000000000..a88ed5533907
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_dev.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_DEV_H_
+#define _CAM_ISP_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_isp_context.h"
+
+/**
+ * struct cam_isp_dev - Camera ISP V4l2 device node
+ *
+ * @sd: Commone camera subdevice node
+ * @ctx: Isp base context storage
+ * @ctx_isp: Isp private context storage
+ * @isp_mutex: ISP dev mutex
+ * @open_cnt: Open device count
+ */
+struct cam_isp_dev {
+ struct cam_subdev sd;
+ struct cam_context ctx[CAM_CTX_MAX];
+ struct cam_isp_context ctx_isp[CAM_CTX_MAX];
+ struct mutex isp_mutex;
+ int32_t open_cnt;
+};
+
+#endif /* __CAM_ISP_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_log.h b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_log.h
new file mode 100644
index 000000000000..321248a3e0eb
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/cam_isp_log.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_LOG_H_
+#define _CAM_ISP_LOG_H_
+
+#include <linux/kernel.h>
+
+#define ISP_TRACE_ENABLE 1
+
+#if (ISP_TRACE_ENABLE == 1)
+ #define ISP_TRACE(args...) trace_printk(args)
+#else
+ #define ISP_TRACE(arg...)
+#endif
+
+#endif /* __CAM_ISP_LOG_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/Makefile
new file mode 100644
index 000000000000..ef4e7a6668f8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += hw_utils/ isp_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_hw_mgr.o cam_ife_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
new file mode 100644
index 000000000000..a6c96bd8929c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -0,0 +1,5066 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <soc/qcom/scm.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_smmu_api.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_isp_packet_parser.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_packet_util.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_common_util.h"
+
+#define CAM_IFE_HW_ENTRIES_MAX 20
+
+#define TZ_SVC_SMMU_PROGRAM 0x15
+#define TZ_SAFE_SYSCALL_ID 0x3
+#define CAM_IFE_SAFE_DISABLE 0
+#define CAM_IFE_SAFE_ENABLE 1
+#define SMMU_SE_IFE 0
+
+#define CAM_ISP_PACKET_META_MAX \
+ (CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
+
+#define CAM_ISP_GENERIC_BLOB_TYPE_MAX \
+ (CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG + 1)
+
+static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
+ CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ CAM_ISP_HW_CMD_BW_UPDATE,
+ CAM_ISP_HW_CMD_UBWC_UPDATE,
+ CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE,
+};
+
+static struct cam_ife_hw_mgr g_ife_hw_mgr;
+
+static int cam_ife_notify_safe_lut_scm(bool safe_trigger)
+{
+ uint32_t camera_hw_version, rc = 0;
+ struct scm_desc desc = {0};
+
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+ if (!rc) {
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_170_V100:
+ case CAM_CPAS_TITAN_170_V110:
+ case CAM_CPAS_TITAN_175_V100:
+
+ desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+ desc.args[0] = SMMU_SE_IFE;
+ desc.args[1] = safe_trigger;
+
+ CAM_DBG(CAM_ISP, "Safe scm call %d", safe_trigger);
+ if (scm_call2(SCM_SIP_FNID(TZ_SVC_SMMU_PROGRAM,
+ TZ_SAFE_SYSCALL_ID), &desc)) {
+ CAM_ERR(CAM_ISP,
+ "scm call to Enable Safe failed");
+ rc = -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv,
+ void *hw_caps_args)
+{
+ int rc = 0;
+ int i;
+ struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_query_cap_cmd *query = hw_caps_args;
+ struct cam_isp_query_cap_cmd query_isp;
+
+ CAM_DBG(CAM_ISP, "enter");
+
+ if (copy_from_user(&query_isp,
+ u64_to_user_ptr(query->caps_handle),
+ sizeof(struct cam_isp_query_cap_cmd))) {
+ rc = -EFAULT;
+ return rc;
+ }
+
+ query_isp.device_iommu.non_secure = hw_mgr->mgr_common.img_iommu_hdl;
+ query_isp.device_iommu.secure = hw_mgr->mgr_common.img_iommu_hdl_secure;
+ query_isp.cdm_iommu.non_secure = hw_mgr->mgr_common.cmd_iommu_hdl;
+ query_isp.cdm_iommu.secure = hw_mgr->mgr_common.cmd_iommu_hdl_secure;
+ query_isp.num_dev = 2;
+ for (i = 0; i < query_isp.num_dev; i++) {
+ query_isp.dev_caps[i].hw_type = CAM_ISP_HW_IFE;
+ query_isp.dev_caps[i].hw_version.major = 1;
+ query_isp.dev_caps[i].hw_version.minor = 7;
+ query_isp.dev_caps[i].hw_version.incr = 0;
+ query_isp.dev_caps[i].hw_version.reserved = 0;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(query->caps_handle),
+ &query_isp, sizeof(struct cam_isp_query_cap_cmd)))
+ rc = -EFAULT;
+
+ CAM_DBG(CAM_ISP, "exit rc :%d", rc);
+
+ return rc;
+}
+
+static int cam_ife_hw_mgr_is_rdi_res(uint32_t res_id)
+{
+ int rc = 0;
+
+ switch (res_id) {
+ case CAM_ISP_IFE_OUT_RES_RDI_0:
+ case CAM_ISP_IFE_OUT_RES_RDI_1:
+ case CAM_ISP_IFE_OUT_RES_RDI_2:
+ case CAM_ISP_IFE_OUT_RES_RDI_3:
+ rc = 1;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_ife_hw_mgr_reset_csid_res(
+ struct cam_ife_hw_mgr_res *isp_hw_res)
+{
+ int i;
+ int rc = 0;
+ struct cam_hw_intf *hw_intf;
+ struct cam_csid_reset_cfg_args csid_reset_args;
+
+ csid_reset_args.reset_type = CAM_IFE_CSID_RESET_PATH;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!isp_hw_res->hw_res[i])
+ continue;
+ csid_reset_args.node_res = isp_hw_res->hw_res[i];
+ hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+ CAM_DBG(CAM_ISP, "Resetting csid hardware %d",
+ hw_intf->hw_idx);
+ if (hw_intf->hw_ops.reset) {
+ rc = hw_intf->hw_ops.reset(hw_intf->hw_priv,
+ &csid_reset_args,
+ sizeof(struct cam_csid_reset_cfg_args));
+ if (rc <= 0)
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ CAM_ERR(CAM_ISP, "RESET HW res failed: (type:%d, id:%d)",
+ isp_hw_res->res_type, isp_hw_res->res_id);
+ return rc;
+}
+
+static int cam_ife_hw_mgr_init_hw_res(
+ struct cam_ife_hw_mgr_res *isp_hw_res)
+{
+ int i;
+ int rc = -1;
+ struct cam_hw_intf *hw_intf;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!isp_hw_res->hw_res[i])
+ continue;
+ hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+ CAM_DBG(CAM_ISP, "enabled vfe hardware %d",
+ hw_intf->hw_idx);
+ if (hw_intf->hw_ops.init) {
+ rc = hw_intf->hw_ops.init(hw_intf->hw_priv,
+ isp_hw_res->hw_res[i],
+ sizeof(struct cam_isp_resource_node));
+ if (rc)
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ CAM_ERR(CAM_ISP, "INIT HW res failed: (type:%d, id:%d)",
+ isp_hw_res->res_type, isp_hw_res->res_id);
+ return rc;
+}
+
+static int cam_ife_hw_mgr_start_hw_res(
+ struct cam_ife_hw_mgr_res *isp_hw_res,
+ struct cam_ife_hw_mgr_ctx *ctx)
+{
+ int i;
+ int rc = -1;
+ struct cam_hw_intf *hw_intf;
+
+ /* Start slave (which is right split) first */
+ for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) {
+ if (!isp_hw_res->hw_res[i])
+ continue;
+ hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+ if (hw_intf->hw_ops.start) {
+ isp_hw_res->hw_res[i]->rdi_only_ctx =
+ ctx->is_rdi_only_context;
+ rc = hw_intf->hw_ops.start(hw_intf->hw_priv,
+ isp_hw_res->hw_res[i],
+ sizeof(struct cam_isp_resource_node));
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start HW resources");
+ goto err;
+ }
+ CAM_DBG(CAM_ISP, "Start HW %d Res %d", hw_intf->hw_idx,
+ isp_hw_res->hw_res[i]->res_id);
+ } else {
+ CAM_ERR(CAM_ISP, "function null");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ CAM_ERR(CAM_ISP, "Start hw res failed (type:%d, id:%d)",
+ isp_hw_res->res_type, isp_hw_res->res_id);
+ return rc;
+}
+
+static void cam_ife_hw_mgr_stop_hw_res(
+ struct cam_ife_hw_mgr_res *isp_hw_res)
+{
+ int i;
+ struct cam_hw_intf *hw_intf;
+ uint32_t dummy_args;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!isp_hw_res->hw_res[i])
+ continue;
+ hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+ if (hw_intf->hw_ops.stop)
+ hw_intf->hw_ops.stop(hw_intf->hw_priv,
+ isp_hw_res->hw_res[i],
+ sizeof(struct cam_isp_resource_node));
+ else
+ CAM_ERR(CAM_ISP, "stop null");
+ if (hw_intf->hw_ops.process_cmd &&
+ isp_hw_res->res_type == CAM_IFE_HW_MGR_RES_IFE_OUT) {
+ hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
+ &dummy_args, sizeof(dummy_args));
+ }
+ }
+}
+
+static void cam_ife_hw_mgr_deinit_hw_res(
+ struct cam_ife_hw_mgr_res *isp_hw_res)
+{
+ int i;
+ struct cam_hw_intf *hw_intf;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!isp_hw_res->hw_res[i])
+ continue;
+ hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+ if (hw_intf->hw_ops.deinit)
+ hw_intf->hw_ops.deinit(hw_intf->hw_priv,
+ isp_hw_res->hw_res[i],
+ sizeof(struct cam_isp_resource_node));
+ }
+}
+
+static void cam_ife_hw_mgr_deinit_hw(
+ struct cam_ife_hw_mgr_ctx *ctx)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ int i = 0;
+
+ if (!ctx->init_done) {
+ CAM_WARN(CAM_ISP, "ctx is not in init state");
+ return;
+ }
+
+ /* Deinit IFE CID */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+ CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CID\n", __func__);
+ cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+ }
+
+ /* Deinit IFE CSID */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+ CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CSID\n", __func__);
+ cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+ }
+
+ /* Deint IFE MUX(SRC) */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+ }
+
+ /* Deinit IFE OUT */
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+ cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
+
+ ctx->init_done = false;
+}
+
+static int cam_ife_hw_mgr_init_hw(
+ struct cam_ife_hw_mgr_ctx *ctx)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ int rc = 0, i;
+
+ CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
+ ctx->ctx_index);
+ /* INIT IFE CID */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+ rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not INIT IFE CID(id :%d)",
+ hw_mgr_res->res_id);
+ goto deinit;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%d",
+ ctx->ctx_index);
+
+ /* INIT IFE csid */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+ rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not INIT IFE CSID(id :%d)",
+ hw_mgr_res->res_id);
+ goto deinit;
+ }
+ }
+
+ /* INIT IFE SRC */
+ CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%d",
+ ctx->ctx_index);
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)",
+ hw_mgr_res->res_id);
+ goto deinit;
+ }
+ }
+
+ /* INIT IFE OUT */
+ CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%d",
+ ctx->ctx_index);
+
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+ rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)",
+ ctx->res_list_ife_out[i].res_id);
+ goto deinit;
+ }
+ }
+
+ return rc;
+deinit:
+ ctx->init_done = true;
+ cam_ife_hw_mgr_deinit_hw(ctx);
+ return rc;
+}
+
+static int cam_ife_hw_mgr_put_res(
+ struct list_head *src_list,
+ struct cam_ife_hw_mgr_res **res)
+{
+ int rc = 0;
+ struct cam_ife_hw_mgr_res *res_ptr = NULL;
+
+ res_ptr = *res;
+ if (res_ptr)
+ list_add_tail(&res_ptr->list, src_list);
+
+ return rc;
+}
+
+static int cam_ife_hw_mgr_get_res(
+ struct list_head *src_list,
+ struct cam_ife_hw_mgr_res **res)
+{
+ int rc = 0;
+ struct cam_ife_hw_mgr_res *res_ptr = NULL;
+
+ if (!list_empty(src_list)) {
+ res_ptr = list_first_entry(src_list,
+ struct cam_ife_hw_mgr_res, list);
+ list_del_init(&res_ptr->list);
+ } else {
+ CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
+ rc = -1;
+ }
+ *res = res_ptr;
+
+ return rc;
+}
+
+static int cam_ife_hw_mgr_free_hw_res(
+ struct cam_ife_hw_mgr_res *isp_hw_res)
+{
+ int rc = 0;
+ int i;
+ struct cam_hw_intf *hw_intf;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!isp_hw_res->hw_res[i])
+ continue;
+ hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+ if (hw_intf->hw_ops.release) {
+ rc = hw_intf->hw_ops.release(hw_intf->hw_priv,
+ isp_hw_res->hw_res[i],
+ sizeof(struct cam_isp_resource_node));
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Release hw resource id %d failed",
+ isp_hw_res->res_id);
+ isp_hw_res->hw_res[i] = NULL;
+ } else
+ CAM_ERR(CAM_ISP, "Release null");
+ }
+ /* caller should make sure the resource is in a list */
+ list_del_init(&isp_hw_res->list);
+ memset(isp_hw_res, 0, sizeof(*isp_hw_res));
+ INIT_LIST_HEAD(&isp_hw_res->list);
+
+ return 0;
+}
+
+static int cam_ife_mgr_csid_stop_hw(
+ struct cam_ife_hw_mgr_ctx *ctx, struct list_head *stop_list,
+ uint32_t base_idx, uint32_t stop_cmd)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_isp_resource_node *isp_res;
+ struct cam_isp_resource_node *stop_res[CAM_IFE_PIX_PATH_RES_MAX - 1];
+ struct cam_csid_hw_stop_args stop;
+ struct cam_hw_intf *hw_intf;
+ uint32_t i, cnt;
+
+ cnt = 0;
+ list_for_each_entry(hw_mgr_res, stop_list, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ isp_res = hw_mgr_res->hw_res[i];
+ if (isp_res->hw_intf->hw_idx != base_idx)
+ continue;
+ CAM_DBG(CAM_ISP, "base_idx %d res_id %d cnt %u",
+ base_idx, isp_res->res_id, cnt);
+ stop_res[cnt] = isp_res;
+ cnt++;
+ }
+ }
+
+ if (cnt) {
+ hw_intf = stop_res[0]->hw_intf;
+ stop.num_res = cnt;
+ stop.node_res = stop_res;
+ stop.stop_cmd = stop_cmd;
+ hw_intf->hw_ops.stop(hw_intf->hw_priv, &stop, sizeof(stop));
+ }
+
+ return 0;
+}
+
+static int cam_ife_hw_mgr_release_hw_for_ctx(
+ struct cam_ife_hw_mgr_ctx *ife_ctx)
+{
+ uint32_t i;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_ife_hw_mgr_res *hw_mgr_res_temp;
+
+ /* ife leaf resource */
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+ cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_out[i]);
+
+ /* ife source resource */
+ list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+ &ife_ctx->res_list_ife_src, list) {
+ cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+ }
+
+ /* ife csid resource */
+ list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+ &ife_ctx->res_list_ife_csid, list) {
+ cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+ }
+
+ /* ife cid resource */
+ list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+ &ife_ctx->res_list_ife_cid, list) {
+ cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+ }
+
+ /* ife root node */
+ if (ife_ctx->res_list_ife_in.res_type != CAM_IFE_HW_MGR_RES_UNINIT)
+ cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_in);
+
+ /* clean up the callback function */
+ ife_ctx->common.cb_priv = NULL;
+ memset(ife_ctx->common.event_cb, 0, sizeof(ife_ctx->common.event_cb));
+
+ CAM_DBG(CAM_ISP, "release context completed ctx id:%d",
+ ife_ctx->ctx_index);
+
+ return 0;
+}
+
+
+static int cam_ife_hw_mgr_put_ctx(
+ struct list_head *src_list,
+ struct cam_ife_hw_mgr_ctx **ife_ctx)
+{
+ int rc = 0;
+ struct cam_ife_hw_mgr_ctx *ctx_ptr = NULL;
+
+ mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+ ctx_ptr = *ife_ctx;
+ if (ctx_ptr)
+ list_add_tail(&ctx_ptr->list, src_list);
+ *ife_ctx = NULL;
+ mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+ return rc;
+}
+
+static int cam_ife_hw_mgr_get_ctx(
+ struct list_head *src_list,
+ struct cam_ife_hw_mgr_ctx **ife_ctx)
+{
+ int rc = 0;
+ struct cam_ife_hw_mgr_ctx *ctx_ptr = NULL;
+
+ mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+ if (!list_empty(src_list)) {
+ ctx_ptr = list_first_entry(src_list,
+ struct cam_ife_hw_mgr_ctx, list);
+ list_del_init(&ctx_ptr->list);
+ } else {
+ CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
+ rc = -1;
+ }
+ *ife_ctx = ctx_ptr;
+ mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
+ return rc;
+}
+
+static void cam_ife_mgr_add_base_info(
+ struct cam_ife_hw_mgr_ctx *ctx,
+ enum cam_isp_hw_split_id split_id,
+ uint32_t base_idx)
+{
+ uint32_t i;
+
+ if (!ctx->num_base) {
+ ctx->base[0].split_id = split_id;
+ ctx->base[0].idx = base_idx;
+ ctx->num_base++;
+ CAM_DBG(CAM_ISP,
+ "Add split id = %d for base idx = %d num_base=%d",
+ split_id, base_idx, ctx->num_base);
+ } else {
+ /*Check if base index already exists in the list */
+ for (i = 0; i < ctx->num_base; i++) {
+ if (ctx->base[i].idx == base_idx) {
+ if (split_id != CAM_ISP_HW_SPLIT_MAX &&
+ ctx->base[i].split_id ==
+ CAM_ISP_HW_SPLIT_MAX)
+ ctx->base[i].split_id = split_id;
+
+ break;
+ }
+ }
+
+ if (i == ctx->num_base) {
+ ctx->base[ctx->num_base].split_id = split_id;
+ ctx->base[ctx->num_base].idx = base_idx;
+ ctx->num_base++;
+ CAM_DBG(CAM_ISP,
+ "Add split_id=%d for base idx=%d num_base=%d",
+ split_id, base_idx, ctx->num_base);
+ }
+ }
+}
+
+static int cam_ife_mgr_process_base_info(
+ struct cam_ife_hw_mgr_ctx *ctx)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_isp_resource_node *res = NULL;
+ uint32_t i;
+
+ if (list_empty(&ctx->res_list_ife_src)) {
+ CAM_ERR(CAM_ISP, "Mux List empty");
+ return -ENODEV;
+ }
+
+ /* IFE mux in resources */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ continue;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ res = hw_mgr_res->hw_res[i];
+ cam_ife_mgr_add_base_info(ctx, i,
+ res->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "add base info for hw %d",
+ res->hw_intf->hw_idx);
+ }
+ }
+ CAM_DBG(CAM_ISP, "ctx base num = %d", ctx->num_base);
+
+ return 0;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out_rdi(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_ife_hw_mgr_res *ife_src_res,
+ struct cam_isp_in_port_info *in_port)
+{
+ int rc = -EINVAL;
+ struct cam_vfe_acquire_args vfe_acquire;
+ struct cam_isp_out_port_info *out_port = NULL;
+ struct cam_ife_hw_mgr_res *ife_out_res;
+ struct cam_hw_intf *hw_intf;
+ uint32_t i, vfe_out_res_id, vfe_in_res_id;
+
+ /* take left resource */
+ vfe_in_res_id = ife_src_res->hw_res[0]->res_id;
+
+ switch (vfe_in_res_id) {
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_0;
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_1;
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_2;
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_3;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "invalid resource type");
+ goto err;
+ }
+ CAM_DBG(CAM_ISP, "vfe_in_res_id = %d, vfe_out_red_id = %d",
+ vfe_in_res_id, vfe_out_res_id);
+
+ vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
+ vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+
+ ife_out_res = &ife_ctx->res_list_ife_out[vfe_out_res_id & 0xFF];
+ for (i = 0; i < in_port->num_out_res; i++) {
+ out_port = &in_port->data[i];
+
+ CAM_DBG(CAM_ISP, "i = %d, vfe_out_res_id = %d, out_port: %d",
+ i, vfe_out_res_id, out_port->res_type);
+
+ if (vfe_out_res_id != out_port->res_type)
+ continue;
+
+ vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+ vfe_acquire.vfe_out.ctx = ife_ctx;
+ vfe_acquire.vfe_out.out_port_info = out_port;
+ vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
+ vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
+ vfe_acquire.vfe_out.is_dual = 0;
+ hw_intf = ife_src_res->hw_res[0]->hw_intf;
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &vfe_acquire,
+ sizeof(struct cam_vfe_acquire_args));
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x",
+ out_port->res_type);
+ goto err;
+ }
+ break;
+ }
+
+ if (i == in_port->num_out_res) {
+ CAM_ERR(CAM_ISP,
+ "Cannot acquire out resource, i=%d, num_out_res=%d",
+ i, in_port->num_out_res);
+ goto err;
+ }
+
+ ife_out_res->hw_res[0] = vfe_acquire.vfe_out.rsrc_node;
+ ife_out_res->is_dual_vfe = 0;
+ ife_out_res->res_id = vfe_out_res_id;
+ ife_out_res->res_type = (enum cam_ife_hw_mgr_res_type)
+ CAM_ISP_RESOURCE_VFE_OUT;
+ ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
+
+ return 0;
+err:
+ return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_ife_hw_mgr_res *ife_src_res,
+ struct cam_isp_in_port_info *in_port)
+{
+ int rc = -1;
+ uint32_t i, j, k;
+ struct cam_vfe_acquire_args vfe_acquire;
+ struct cam_isp_out_port_info *out_port;
+ struct cam_ife_hw_mgr_res *ife_out_res;
+ struct cam_hw_intf *hw_intf;
+
+ for (i = 0; i < in_port->num_out_res; i++) {
+ out_port = &in_port->data[i];
+ k = out_port->res_type & 0xFF;
+ if (k >= CAM_IFE_HW_OUT_RES_MAX) {
+ CAM_ERR(CAM_ISP, "invalid output resource type 0x%x",
+ out_port->res_type);
+ continue;
+ }
+
+ if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
+ continue;
+
+ CAM_DBG(CAM_ISP, "res_type 0x%x",
+ out_port->res_type);
+
+ ife_out_res = &ife_ctx->res_list_ife_out[k];
+ ife_out_res->is_dual_vfe = in_port->usage_type;
+
+ vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
+ vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+ vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+ vfe_acquire.vfe_out.ctx = ife_ctx;
+ vfe_acquire.vfe_out.out_port_info = out_port;
+ vfe_acquire.vfe_out.is_dual = ife_src_res->is_dual_vfe;
+ vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
+
+ for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+ if (!ife_src_res->hw_res[j])
+ continue;
+
+ hw_intf = ife_src_res->hw_res[j]->hw_intf;
+
+ if (j == CAM_ISP_HW_SPLIT_LEFT) {
+ vfe_acquire.vfe_out.split_id =
+ CAM_ISP_HW_SPLIT_LEFT;
+ if (ife_src_res->is_dual_vfe) {
+ /*TBD */
+ vfe_acquire.vfe_out.is_master = 1;
+ vfe_acquire.vfe_out.dual_slave_core =
+ (hw_intf->hw_idx == 0) ? 1 : 0;
+ } else {
+ vfe_acquire.vfe_out.is_master = 0;
+ vfe_acquire.vfe_out.dual_slave_core =
+ 0;
+ }
+ } else {
+ vfe_acquire.vfe_out.split_id =
+ CAM_ISP_HW_SPLIT_RIGHT;
+ vfe_acquire.vfe_out.is_master = 0;
+ vfe_acquire.vfe_out.dual_slave_core =
+ (hw_intf->hw_idx == 0) ? 1 : 0;
+ }
+
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &vfe_acquire,
+ sizeof(struct cam_vfe_acquire_args));
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Can not acquire out resource 0x%x",
+ out_port->res_type);
+ goto err;
+ }
+
+ ife_out_res->hw_res[j] =
+ vfe_acquire.vfe_out.rsrc_node;
+ CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x",
+ ife_out_res->hw_res[j]->res_type,
+ ife_out_res->hw_res[j]->res_id);
+
+ }
+ ife_out_res->res_type =
+ (enum cam_ife_hw_mgr_res_type)CAM_ISP_RESOURCE_VFE_OUT;
+ ife_out_res->res_id = out_port->res_type;
+ ife_out_res->parent = ife_src_res;
+ ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
+ }
+
+ return 0;
+err:
+ /* release resource at the entry function */
+ return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port)
+{
+ int rc = -EINVAL;
+ struct cam_ife_hw_mgr_res *ife_src_res;
+
+ list_for_each_entry(ife_src_res, &ife_ctx->res_list_ife_src, list) {
+ if (ife_src_res->num_children)
+ continue;
+
+ switch (ife_src_res->res_id) {
+ case CAM_ISP_HW_VFE_IN_CAMIF:
+ case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
+ rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx,
+ ife_src_res, in_port);
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ rc = cam_ife_hw_mgr_acquire_res_ife_out_rdi(ife_ctx,
+ ife_src_res, in_port);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Unknown IFE SRC resource: %d",
+ ife_src_res->res_id);
+ break;
+ }
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+err:
+ /* release resource on entry function */
+ return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_src(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port)
+{
+ int rc = -1;
+ int i;
+ struct cam_ife_hw_mgr_res *csid_res;
+ struct cam_ife_hw_mgr_res *ife_src_res;
+ struct cam_vfe_acquire_args vfe_acquire;
+ struct cam_hw_intf *hw_intf;
+ struct cam_ife_hw_mgr *ife_hw_mgr;
+
+ ife_hw_mgr = ife_ctx->hw_mgr;
+
+ list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) {
+ if (csid_res->num_children)
+ continue;
+
+ rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
+ &ife_src_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+ goto err;
+ }
+ cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
+ &ife_src_res);
+
+ vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN;
+ vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+ vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops;
+ vfe_acquire.vfe_in.in_port = in_port;
+
+ switch (csid_res->res_id) {
+ case CAM_IFE_PIX_PATH_RES_IPP:
+ vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_CAMIF;
+ if (csid_res->is_dual_vfe)
+ vfe_acquire.vfe_in.sync_mode =
+ CAM_ISP_HW_SYNC_MASTER;
+ else
+ vfe_acquire.vfe_in.sync_mode =
+ CAM_ISP_HW_SYNC_NONE;
+
+ break;
+ case CAM_IFE_PIX_PATH_RES_PPP:
+ vfe_acquire.vfe_in.res_id =
+ CAM_ISP_HW_VFE_IN_CAMIF_LITE;
+ vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+
+ break;
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI0;
+ vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+ break;
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI1;
+ vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+ break;
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI2;
+ vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+ break;
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI3;
+ vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node");
+ goto err;
+ }
+ ife_src_res->res_type =
+ (enum cam_ife_hw_mgr_res_type)vfe_acquire.rsrc_type;
+ ife_src_res->res_id = vfe_acquire.vfe_in.res_id;
+ ife_src_res->is_dual_vfe = csid_res->is_dual_vfe;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!csid_res->hw_res[i])
+ continue;
+
+ hw_intf = ife_hw_mgr->ife_devices[
+ csid_res->hw_res[i]->hw_intf->hw_idx];
+
+ /* fill in more acquire information as needed */
+ /* slave Camif resource, */
+ if (i == CAM_ISP_HW_SPLIT_RIGHT &&
+ ife_src_res->is_dual_vfe)
+ vfe_acquire.vfe_in.sync_mode =
+ CAM_ISP_HW_SYNC_SLAVE;
+
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &vfe_acquire,
+ sizeof(struct cam_vfe_acquire_args));
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Can not acquire IFE HW res %d",
+ csid_res->res_id);
+ goto err;
+ }
+ ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
+ CAM_DBG(CAM_ISP,
+ "acquire success IFE:%d res type :0x%x res id:0x%x",
+ hw_intf->hw_idx,
+ ife_src_res->hw_res[i]->res_type,
+ ife_src_res->hw_res[i]->res_id);
+
+ }
+
+ /* It should be one to one mapping between
+ * csid resource and ife source resource
+ */
+ csid_res->child[0] = ife_src_res;
+ ife_src_res->parent = csid_res;
+ csid_res->child[csid_res->num_children++] = ife_src_res;
+ CAM_DBG(CAM_ISP, "csid_res=%d num_children=%d ife_src_res=%d",
+ csid_res->res_id, csid_res->num_children,
+ ife_src_res->res_id);
+ }
+
+ return 0;
+err:
+ /* release resource at the entry function */
+ return rc;
+}
+
+static int cam_ife_mgr_acquire_cid_res(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port,
+ struct cam_ife_hw_mgr_res **cid_res,
+ enum cam_ife_pix_path_res_id csid_path)
+{
+ int rc = -1;
+ int i, j;
+ struct cam_ife_hw_mgr *ife_hw_mgr;
+ struct cam_hw_intf *hw_intf;
+ struct cam_ife_hw_mgr_res *cid_res_temp, *cid_res_iterator;
+ struct cam_csid_hw_reserve_resource_args csid_acquire;
+ uint32_t acquired_cnt = 0;
+ struct cam_isp_out_port_info *out_port = NULL;
+
+ ife_hw_mgr = ife_ctx->hw_mgr;
+ *cid_res = NULL;
+
+ rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, cid_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+ goto end;
+ }
+
+ cid_res_temp = *cid_res;
+
+ csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+ csid_acquire.in_port = in_port;
+ csid_acquire.res_id = csid_path;
+ CAM_DBG(CAM_ISP, "path %d", csid_path);
+
+ if (in_port->num_out_res)
+ out_port = &(in_port->data[0]);
+
+ /* Try acquiring CID resource from previously acquired HW */
+ list_for_each_entry(cid_res_iterator, &ife_ctx->res_list_ife_cid,
+ list) {
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!cid_res_iterator->hw_res[i])
+ continue;
+
+ if (cid_res_iterator->is_secure == 1 ||
+ (cid_res_iterator->is_secure == 0 &&
+ in_port->num_out_res &&
+ out_port->secure_mode == 1))
+ continue;
+
+ hw_intf = cid_res_iterator->hw_res[i]->hw_intf;
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &csid_acquire, sizeof(csid_acquire));
+ if (rc) {
+ CAM_DBG(CAM_ISP,
+ "No ife cid resource from hw %d",
+ hw_intf->hw_idx);
+ continue;
+ }
+
+ cid_res_temp->hw_res[acquired_cnt++] =
+ csid_acquire.node_res;
+
+ CAM_DBG(CAM_ISP,
+ "acquired csid(%s)=%d CID rsrc successfully",
+ (i == 0) ? "left" : "right",
+ hw_intf->hw_idx);
+
+ if (in_port->usage_type && acquired_cnt == 1 &&
+ csid_path == CAM_IFE_PIX_PATH_RES_IPP)
+ /*
+ * Continue to acquire Right for IPP.
+ * Dual IFE for RDI and PPP is not currently
+ * supported.
+ */
+
+ continue;
+
+ if (acquired_cnt)
+ /*
+ * If successfully acquired CID from
+ * previously acquired HW, skip the next
+ * part
+ */
+ goto acquire_successful;
+ }
+ }
+
+ /* Acquire Left if not already acquired */
+ for (i = CAM_IFE_CSID_HW_NUM_MAX - 1; i >= 0; i--) {
+ if (!ife_hw_mgr->csid_devices[i])
+ continue;
+
+ hw_intf = ife_hw_mgr->csid_devices[i];
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire,
+ sizeof(csid_acquire));
+ if (rc)
+ continue;
+ else {
+ cid_res_temp->hw_res[acquired_cnt++] =
+ csid_acquire.node_res;
+ break;
+ }
+ }
+
+ if (i == -1 || !csid_acquire.node_res) {
+ CAM_ERR(CAM_ISP, "Can not acquire ife cid resource for path %d",
+ csid_path);
+ goto put_res;
+ }
+
+acquire_successful:
+ CAM_DBG(CAM_ISP, "CID left acquired success is_dual %d",
+ in_port->usage_type);
+
+ cid_res_temp->res_type = CAM_IFE_HW_MGR_RES_CID;
+ /* CID(DT_ID) value of acquire device, require for path */
+ cid_res_temp->res_id = csid_acquire.node_res->res_id;
+ cid_res_temp->is_dual_vfe = in_port->usage_type;
+
+ if (in_port->num_out_res)
+ cid_res_temp->is_secure = out_port->secure_mode;
+
+ cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, cid_res);
+
+ /*
+ * Acquire Right if not already acquired.
+ * Dual IFE for RDI and PPP is not currently supported.
+ */
+ if (cid_res_temp->is_dual_vfe && csid_path
+ == CAM_IFE_PIX_PATH_RES_IPP && acquired_cnt == 1) {
+ csid_acquire.node_res = NULL;
+ csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+ csid_acquire.in_port = in_port;
+ for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+ if (!ife_hw_mgr->csid_devices[j])
+ continue;
+
+ if (j == cid_res_temp->hw_res[0]->hw_intf->hw_idx)
+ continue;
+
+ hw_intf = ife_hw_mgr->csid_devices[j];
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &csid_acquire, sizeof(csid_acquire));
+ if (rc)
+ continue;
+ else
+ break;
+ }
+
+ if (j == CAM_IFE_CSID_HW_NUM_MAX) {
+ CAM_ERR(CAM_ISP,
+ "Can not acquire ife csid rdi resource");
+ goto end;
+ }
+ cid_res_temp->hw_res[1] = csid_acquire.node_res;
+ CAM_DBG(CAM_ISP, "CID right acquired success is_dual %d",
+ in_port->usage_type);
+ }
+ cid_res_temp->parent = &ife_ctx->res_list_ife_in;
+ ife_ctx->res_list_ife_in.child[
+ ife_ctx->res_list_ife_in.num_children++] = cid_res_temp;
+
+ return 0;
+put_res:
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, cid_res);
+end:
+ return rc;
+
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_csid_pxl(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port,
+ bool is_ipp)
+{
+ int rc = -1;
+ int i;
+ int master_idx = -1;
+
+ struct cam_ife_hw_mgr *ife_hw_mgr;
+ struct cam_ife_hw_mgr_res *csid_res;
+ struct cam_ife_hw_mgr_res *cid_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_csid_hw_reserve_resource_args csid_acquire;
+ enum cam_ife_pix_path_res_id path_res_id;
+
+ ife_hw_mgr = ife_ctx->hw_mgr;
+ /* get cid resource */
+ if (is_ipp)
+ path_res_id = CAM_IFE_PIX_PATH_RES_IPP;
+ else
+ path_res_id = CAM_IFE_PIX_PATH_RES_PPP;
+
+ rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res,
+ path_res_id);
+
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+ goto end;
+ }
+
+ rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+ goto end;
+ }
+
+ csid_res->res_type =
+ (enum cam_ife_hw_mgr_res_type)CAM_ISP_RESOURCE_PIX_PATH;
+
+ csid_res->res_id = path_res_id;
+
+ if (in_port->usage_type && is_ipp)
+ csid_res->is_dual_vfe = 1;
+ else {
+ csid_res->is_dual_vfe = 0;
+ csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+ }
+
+ /* IPP resource needs to be from same HW as CID resource */
+ for (i = 0; i <= csid_res->is_dual_vfe; i++) {
+ CAM_DBG(CAM_ISP, "i %d is_dual %d", i, csid_res->is_dual_vfe);
+
+ csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+ csid_acquire.res_id = path_res_id;
+ csid_acquire.cid = cid_res->hw_res[i]->res_id;
+ csid_acquire.in_port = in_port;
+ csid_acquire.out_port = in_port->data;
+ csid_acquire.node_res = NULL;
+
+ hw_intf = cid_res->hw_res[i]->hw_intf;
+
+ if (csid_res->is_dual_vfe) {
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ master_idx = hw_intf->hw_idx;
+ csid_acquire.sync_mode =
+ CAM_ISP_HW_SYNC_MASTER;
+ } else {
+ if (master_idx == -1) {
+ CAM_ERR(CAM_ISP,
+ "No Master found");
+ goto put_res;
+ }
+ csid_acquire.sync_mode =
+ CAM_ISP_HW_SYNC_SLAVE;
+ csid_acquire.master_idx = master_idx;
+ }
+ }
+
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &csid_acquire, sizeof(csid_acquire));
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Cannot acquire ife csid pxl path rsrc %s",
+ (is_ipp) ? "IPP" : "PPP");
+ goto put_res;
+ }
+
+ csid_res->hw_res[i] = csid_acquire.node_res;
+ CAM_DBG(CAM_ISP,
+ "acquired csid(%s)=%d pxl path rsrc %s successfully",
+ (i == 0) ? "left" : "right", hw_intf->hw_idx,
+ (is_ipp) ? "IPP" : "PPP");
+ }
+ cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
+
+ csid_res->parent = cid_res;
+ cid_res->child[cid_res->num_children++] = csid_res;
+
+ CAM_DBG(CAM_ISP, "acquire res %d", csid_acquire.res_id);
+
+ return 0;
+put_res:
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res);
+end:
+ return rc;
+}
+
+static enum cam_ife_pix_path_res_id
+ cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+ uint32_t out_port_type)
+{
+ enum cam_ife_pix_path_res_id path_id;
+
+ switch (out_port_type) {
+ case CAM_ISP_IFE_OUT_RES_RDI_0:
+ path_id = CAM_IFE_PIX_PATH_RES_RDI_0;
+ break;
+ case CAM_ISP_IFE_OUT_RES_RDI_1:
+ path_id = CAM_IFE_PIX_PATH_RES_RDI_1;
+ break;
+ case CAM_ISP_IFE_OUT_RES_RDI_2:
+ path_id = CAM_IFE_PIX_PATH_RES_RDI_2;
+ break;
+ case CAM_ISP_IFE_OUT_RES_RDI_3:
+ path_id = CAM_IFE_PIX_PATH_RES_RDI_3;
+ break;
+ default:
+ path_id = CAM_IFE_PIX_PATH_RES_MAX;
+ CAM_DBG(CAM_ISP, "maximum rdi output type exceeded");
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "out_port %d path_id %d", out_port_type, path_id);
+
+ return path_id;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port)
+{
+ int rc = -EINVAL;
+ int i;
+
+ struct cam_ife_hw_mgr *ife_hw_mgr;
+ struct cam_ife_hw_mgr_res *csid_res;
+ struct cam_ife_hw_mgr_res *cid_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_isp_out_port_info *out_port;
+ struct cam_csid_hw_reserve_resource_args csid_acquire;
+ enum cam_ife_pix_path_res_id path_type;
+
+ ife_hw_mgr = ife_ctx->hw_mgr;
+
+ for (i = 0; i < in_port->num_out_res; i++) {
+ out_port = &in_port->data[i];
+ path_type = cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+ out_port->res_type);
+ if (path_type == CAM_IFE_PIX_PATH_RES_MAX)
+ continue;
+
+ /* get cid resource */
+ rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res,
+ path_type);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+ goto end;
+ }
+
+ /* For each RDI we need CID + PATH resource */
+ rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
+ &csid_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+ goto end;
+ }
+
+ memset(&csid_acquire, 0, sizeof(csid_acquire));
+ csid_acquire.res_id = path_type;
+
+ csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+ csid_acquire.cid = cid_res->hw_res[0]->res_id;
+ csid_acquire.in_port = in_port;
+ csid_acquire.out_port = out_port;
+ csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+ csid_acquire.node_res = NULL;
+
+ hw_intf = cid_res->hw_res[0]->hw_intf;
+ rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+ &csid_acquire, sizeof(csid_acquire));
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "CSID Path reserve failed hw=%d rc=%d cid=%d",
+ hw_intf->hw_idx, rc,
+ cid_res->hw_res[0]->res_id);
+
+ goto put_res;
+ }
+
+ if (csid_acquire.node_res == NULL) {
+ CAM_ERR(CAM_ISP, "Acquire CSID RDI rsrc failed");
+
+ goto put_res;
+ }
+
+ csid_res->res_type = (enum cam_ife_hw_mgr_res_type)
+ CAM_ISP_RESOURCE_PIX_PATH;
+ csid_res->res_id = csid_acquire.res_id;
+ csid_res->is_dual_vfe = 0;
+ csid_res->hw_res[0] = csid_acquire.node_res;
+ csid_res->hw_res[1] = NULL;
+ CAM_DBG(CAM_ISP, "acquire res %d",
+ csid_acquire.res_id);
+ csid_res->parent = cid_res;
+ cid_res->child[cid_res->num_children++] =
+ csid_res;
+ cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
+
+ }
+
+ return 0;
+put_res:
+ cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res);
+end:
+ return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_root(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port)
+{
+ int rc = -1;
+
+ if (ife_ctx->res_list_ife_in.res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+ /* first acquire */
+ ife_ctx->res_list_ife_in.res_type = CAM_IFE_HW_MGR_RES_ROOT;
+ ife_ctx->res_list_ife_in.res_id = in_port->res_type;
+ ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
+ } else if (ife_ctx->res_list_ife_in.res_id != in_port->res_type) {
+ CAM_ERR(CAM_ISP, "No Free resource for this context");
+ goto err;
+ } else {
+ /* else do nothing */
+ }
+ return 0;
+err:
+ /* release resource in entry function */
+ return rc;
+}
+
+static int cam_ife_hw_mgr_preprocess_out_port(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port,
+ int *ipp_count,
+ int *rdi_count,
+ int *ppp_count)
+{
+ int ipp_num = 0;
+ int rdi_num = 0;
+ int ppp_num = 0;
+ uint32_t i;
+ struct cam_isp_out_port_info *out_port;
+ struct cam_ife_hw_mgr *ife_hw_mgr;
+
+ ife_hw_mgr = ife_ctx->hw_mgr;
+
+ for (i = 0; i < in_port->num_out_res; i++) {
+ out_port = &in_port->data[i];
+ if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
+ rdi_num++;
+ else if (out_port->res_type == CAM_ISP_IFE_OUT_RES_2PD)
+ ppp_num++;
+ else
+ ipp_num++;
+ }
+
+ *ipp_count = ipp_num;
+ *rdi_count = rdi_num;
+ *ppp_count = ppp_num;
+
+ return 0;
+}
+
+static int cam_ife_mgr_acquire_hw_for_ctx(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ struct cam_isp_in_port_info *in_port,
+ uint32_t *num_pix_port, uint32_t *num_rdi_port)
+{
+ int rc = -1;
+ int is_dual_vfe = 0;
+ int ipp_count = 0;
+ int rdi_count = 0;
+ int ppp_count = 0;
+
+ is_dual_vfe = in_port->usage_type;
+
+ /* get root node resource */
+ rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not acquire csid rx resource");
+ goto err;
+ }
+
+ cam_ife_hw_mgr_preprocess_out_port(ife_ctx, in_port,
+ &ipp_count, &rdi_count, &ppp_count);
+
+ if (!ipp_count && !rdi_count && !ppp_count) {
+ CAM_ERR(CAM_ISP, "No PIX or RDI or PPP resource");
+ return -EINVAL;
+ }
+
+ if (ipp_count) {
+ /* get ife csid IPP resource */
+ rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx,
+ in_port, true);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Acquire IFE CSID IPP resource Failed");
+ goto err;
+ }
+ }
+
+ if (rdi_count) {
+ /* get ife csid rdi resource */
+ rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Acquire IFE CSID RDI resource Failed");
+ goto err;
+ }
+ }
+
+ if (ppp_count) {
+ /* get ife csid PPP resource */
+ rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx,
+ in_port, false);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Acquire IFE CSID PPP resource Failed");
+ goto err;
+ }
+ }
+
+
+ /* get ife src resource */
+ rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed");
+ goto err;
+ }
+
+ rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed");
+ goto err;
+ }
+
+ *num_pix_port += ipp_count + ppp_count;
+ *num_rdi_port += rdi_count;
+
+ return 0;
+err:
+ /* release resource at the acquire entry funciton */
+ return rc;
+}
+
+void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
+ enum cam_cdm_cb_status status, uint64_t cookie)
+{
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+
+ if (!userdata) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ return;
+ }
+
+ ctx = userdata;
+
+ if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+ complete(&ctx->config_done_complete);
+ CAM_DBG(CAM_ISP,
+ "Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+ handle, userdata, status, cookie);
+ } else {
+ CAM_WARN(CAM_ISP,
+ "Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+ handle, userdata, status, cookie);
+ }
+}
+
+/* entry function: acquire_hw */
+static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+ struct cam_ife_hw_mgr *ife_hw_mgr = hw_mgr_priv;
+ struct cam_hw_acquire_args *acquire_args = acquire_hw_args;
+ int rc = -1;
+ int i, j;
+ struct cam_ife_hw_mgr_ctx *ife_ctx;
+ struct cam_isp_in_port_info *in_port = NULL;
+ struct cam_cdm_acquire_data cdm_acquire;
+ uint32_t num_pix_port_per_in = 0;
+ uint32_t num_rdi_port_per_in = 0;
+ uint32_t total_pix_port = 0;
+ uint32_t total_rdi_port = 0;
+ uint32_t in_port_length = 0;
+ uint32_t total_in_port_length = 0;
+ struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
+
+ CAM_DBG(CAM_ISP, "Enter...");
+
+ if (!acquire_args || acquire_args->num_acq <= 0) {
+ CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+ return -EINVAL;
+ }
+
+ /* get the ife ctx */
+ rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+ if (rc || !ife_ctx) {
+ CAM_ERR(CAM_ISP, "Get ife hw context failed");
+ goto err;
+ }
+
+ ife_ctx->common.cb_priv = acquire_args->context_data;
+ for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
+ ife_ctx->common.event_cb[i] = acquire_args->event_cb;
+
+ ife_ctx->hw_mgr = ife_hw_mgr;
+
+
+ memcpy(cdm_acquire.identifier, "ife", sizeof("ife"));
+ cdm_acquire.cell_index = 0;
+ cdm_acquire.handle = 0;
+ cdm_acquire.userdata = ife_ctx;
+ cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX;
+ for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ if (ife_hw_mgr->cdm_reg_map[i])
+ cdm_acquire.base_array[j++] =
+ ife_hw_mgr->cdm_reg_map[i];
+ }
+ cdm_acquire.base_array_cnt = j;
+
+
+ cdm_acquire.id = CAM_CDM_VIRTUAL;
+ cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
+ goto free_ctx;
+ }
+
+ CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
+ cdm_acquire.handle);
+ ife_ctx->cdm_handle = cdm_acquire.handle;
+ ife_ctx->cdm_ops = cdm_acquire.ops;
+
+ acquire_hw_info =
+ (struct cam_isp_acquire_hw_info *)acquire_args->acquire_info;
+ in_port = (struct cam_isp_in_port_info *)
+ ((uint8_t *)&acquire_hw_info->data +
+ acquire_hw_info->input_info_offset);
+
+ /* acquire HW resources */
+ for (i = 0; i < acquire_hw_info->num_inputs; i++) {
+ in_port_length = sizeof(struct cam_isp_in_port_info) +
+ (in_port->num_out_res - 1) *
+ sizeof(struct cam_isp_out_port_info);
+ total_in_port_length += in_port_length;
+
+ if (total_in_port_length > acquire_hw_info->input_info_size) {
+ CAM_ERR(CAM_ISP, "buffer size is not enough");
+ rc = -EINVAL;
+ goto free_res;
+ }
+ rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
+ &num_pix_port_per_in, &num_rdi_port_per_in);
+ total_pix_port += num_pix_port_per_in;
+ total_rdi_port += num_rdi_port_per_in;
+
+ if (rc) {
+ CAM_ERR(CAM_ISP, "can not acquire resource");
+ goto free_res;
+ }
+ in_port = (struct cam_isp_in_port_info *)((uint8_t *)in_port +
+ in_port_length);
+ }
+
+ /* Check whether context has only RDI resource */
+ if (!total_pix_port) {
+ ife_ctx->is_rdi_only_context = 1;
+ CAM_DBG(CAM_ISP, "RDI only context");
+ }
+
+ /* Process base info */
+ rc = cam_ife_mgr_process_base_info(ife_ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Process base info failed");
+ goto free_res;
+ }
+
+ acquire_args->ctxt_to_hw_map = ife_ctx;
+ ife_ctx->ctx_in_use = 1;
+
+ cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
+
+ CAM_DBG(CAM_ISP, "Exit...(success)");
+
+ return 0;
+free_res:
+ cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
+ cam_cdm_release(ife_ctx->cdm_handle);
+free_ctx:
+ cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+err:
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+ return rc;
+}
+
+/* entry function: acquire_hw */
+static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args)
+{
+ struct cam_ife_hw_mgr *ife_hw_mgr = hw_mgr_priv;
+ struct cam_hw_acquire_args *acquire_args = acquire_hw_args;
+ int rc = -1;
+ int i, j;
+ struct cam_ife_hw_mgr_ctx *ife_ctx;
+ struct cam_isp_in_port_info *in_port = NULL;
+ struct cam_isp_resource *isp_resource = NULL;
+ struct cam_cdm_acquire_data cdm_acquire;
+ uint32_t num_pix_port_per_in = 0;
+ uint32_t num_rdi_port_per_in = 0;
+ uint32_t total_pix_port = 0;
+ uint32_t total_rdi_port = 0;
+ uint32_t in_port_length = 0;
+
+ CAM_DBG(CAM_ISP, "Enter...");
+
+ if (!acquire_args || acquire_args->num_acq <= 0) {
+ CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+ return -EINVAL;
+ }
+
+ /* get the ife ctx */
+ rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+ if (rc || !ife_ctx) {
+ CAM_ERR(CAM_ISP, "Get ife hw context failed");
+ goto err;
+ }
+
+ ife_ctx->common.cb_priv = acquire_args->context_data;
+ for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
+ ife_ctx->common.event_cb[i] = acquire_args->event_cb;
+
+ ife_ctx->hw_mgr = ife_hw_mgr;
+
+
+ memcpy(cdm_acquire.identifier, "ife", sizeof("ife"));
+ cdm_acquire.cell_index = 0;
+ cdm_acquire.handle = 0;
+ cdm_acquire.userdata = ife_ctx;
+ cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX;
+ for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ if (ife_hw_mgr->cdm_reg_map[i])
+ cdm_acquire.base_array[j++] =
+ ife_hw_mgr->cdm_reg_map[i];
+ }
+ cdm_acquire.base_array_cnt = j;
+
+
+ cdm_acquire.id = CAM_CDM_VIRTUAL;
+ cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
+ goto free_ctx;
+ }
+
+ CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
+ cdm_acquire.handle);
+ ife_ctx->cdm_handle = cdm_acquire.handle;
+ ife_ctx->cdm_ops = cdm_acquire.ops;
+
+ isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
+
+ /* acquire HW resources */
+ for (i = 0; i < acquire_args->num_acq; i++) {
+ if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT)
+ continue;
+
+ CAM_DBG(CAM_ISP,
+ "start copy from user handle %lld with len = %d",
+ isp_resource[i].res_hdl,
+ isp_resource[i].length);
+
+ in_port_length = sizeof(struct cam_isp_in_port_info);
+
+ if (in_port_length > isp_resource[i].length) {
+ CAM_ERR(CAM_ISP, "buffer size is not enough");
+ rc = -EINVAL;
+ goto free_res;
+ }
+
+ in_port = memdup_user(
+ u64_to_user_ptr(isp_resource[i].res_hdl),
+ isp_resource[i].length);
+ if (!IS_ERR(in_port)) {
+ if (in_port->num_out_res > CAM_IFE_HW_OUT_RES_MAX) {
+ CAM_ERR(CAM_ISP, "too many output res %d",
+ in_port->num_out_res);
+ rc = -EINVAL;
+ kfree(in_port);
+ goto free_res;
+ }
+
+ in_port_length = sizeof(struct cam_isp_in_port_info) +
+ (in_port->num_out_res - 1) *
+ sizeof(struct cam_isp_out_port_info);
+ if (in_port_length > isp_resource[i].length) {
+ CAM_ERR(CAM_ISP, "buffer size is not enough");
+ rc = -EINVAL;
+ kfree(in_port);
+ goto free_res;
+ }
+
+ rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
+ &num_pix_port_per_in, &num_rdi_port_per_in);
+ total_pix_port += num_pix_port_per_in;
+ total_rdi_port += num_rdi_port_per_in;
+
+ kfree(in_port);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "can not acquire resource");
+ goto free_res;
+ }
+ } else {
+ CAM_ERR(CAM_ISP,
+ "Copy from user failed with in_port = %pK",
+ in_port);
+ rc = -EFAULT;
+ goto free_res;
+ }
+ }
+
+ /* Check whether context has only RDI resource */
+ if (!total_pix_port) {
+ ife_ctx->is_rdi_only_context = 1;
+ CAM_DBG(CAM_ISP, "RDI only context");
+ }
+
+ /* Process base info */
+ rc = cam_ife_mgr_process_base_info(ife_ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Process base info failed");
+ goto free_res;
+ }
+
+ acquire_args->ctxt_to_hw_map = ife_ctx;
+ ife_ctx->ctx_in_use = 1;
+
+ cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
+
+ CAM_DBG(CAM_ISP, "Exit...(success)");
+
+ return 0;
+free_res:
+ cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
+ cam_cdm_release(ife_ctx->cdm_handle);
+free_ctx:
+ cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+err:
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+ return rc;
+}
+
+/* entry function: acquire_hw */
+static int cam_ife_mgr_acquire(void *hw_mgr_priv,
+ void *acquire_hw_args)
+{
+ struct cam_hw_acquire_args *acquire_args = acquire_hw_args;
+ int rc = -1;
+
+ CAM_DBG(CAM_ISP, "Enter...");
+
+ if (!acquire_args || acquire_args->num_acq <= 0) {
+ CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+ return -EINVAL;
+ }
+
+ if (acquire_args->num_acq == CAM_API_COMPAT_CONSTANT)
+ rc = cam_ife_mgr_acquire_hw(hw_mgr_priv, acquire_hw_args);
+ else
+ rc = cam_ife_mgr_acquire_dev(hw_mgr_priv, acquire_hw_args);
+
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+ return rc;
+}
+
+
+static int cam_isp_blob_bw_update(
+ struct cam_isp_bw_config *bw_config,
+ struct cam_ife_hw_mgr_ctx *ctx)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_bw_update_args bw_upd_args;
+ uint64_t cam_bw_bps = 0;
+ uint64_t ext_bw_bps = 0;
+ int rc = -EINVAL;
+ uint32_t i;
+ bool camif_l_bw_updated = false;
+ bool camif_r_bw_updated = false;
+
+ CAM_DBG(CAM_PERF,
+ "usage=%u left cam_bw_bps=%llu ext_bw_bps=%llu\n"
+ "right cam_bw_bps=%llu ext_bw_bps=%llu",
+ bw_config->usage_type,
+ bw_config->left_pix_vote.cam_bw_bps,
+ bw_config->left_pix_vote.ext_bw_bps,
+ bw_config->right_pix_vote.cam_bw_bps,
+ bw_config->right_pix_vote.ext_bw_bps);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ if (camif_l_bw_updated)
+ continue;
+
+ cam_bw_bps =
+ bw_config->left_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->left_pix_vote.ext_bw_bps;
+
+ camif_l_bw_updated = true;
+ } else {
+ if (camif_r_bw_updated)
+ continue;
+
+ cam_bw_bps =
+ bw_config->right_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->right_pix_vote.ext_bw_bps;
+
+ camif_r_bw_updated = true;
+ }
+ else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
+ && (hw_mgr_res->res_id <=
+ CAM_ISP_HW_VFE_IN_RDI3)) {
+ uint32_t idx = hw_mgr_res->res_id -
+ CAM_ISP_HW_VFE_IN_RDI0;
+ if (idx >= bw_config->num_rdi)
+ continue;
+
+ cam_bw_bps =
+ bw_config->rdi_vote[idx].cam_bw_bps;
+ ext_bw_bps =
+ bw_config->rdi_vote[idx].ext_bw_bps;
+ } else if (hw_mgr_res->res_id ==
+ CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ if (camif_l_bw_updated)
+ continue;
+
+ cam_bw_bps =
+ bw_config->left_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->left_pix_vote.ext_bw_bps;
+
+ camif_l_bw_updated = true;
+ } else {
+ if (camif_r_bw_updated)
+ continue;
+
+ cam_bw_bps =
+ bw_config->right_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->right_pix_vote.ext_bw_bps;
+
+ camif_r_bw_updated = true;
+ }
+ } else
+ if (hw_mgr_res->hw_res[i]) {
+ CAM_ERR(CAM_ISP, "Invalid res_id %u",
+ hw_mgr_res->res_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ bw_upd_args.node_res =
+ hw_mgr_res->hw_res[i];
+
+ bw_upd_args.camnoc_bw_bytes = cam_bw_bps;
+ bw_upd_args.external_bw_bytes = ext_bw_bps;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_BW_UPDATE,
+ &bw_upd_args,
+ sizeof(struct cam_vfe_bw_update_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "BW Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
+/* entry function: config_hw */
+static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
+ void *config_hw_args)
+{
+ int rc = -1, i;
+ struct cam_hw_config_args *cfg;
+ struct cam_hw_update_entry *cmd;
+ struct cam_cdm_bl_request *cdm_cmd;
+ struct cam_ife_hw_mgr_ctx *ctx;
+ struct cam_isp_prepare_hw_update_data *hw_update_data;
+
+ CAM_DBG(CAM_ISP, "Enter");
+ if (!hw_mgr_priv || !config_hw_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ cfg = config_hw_args;
+ ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
+ if (!ctx) {
+ CAM_ERR(CAM_ISP, "Invalid context is used");
+ return -EPERM;
+ }
+
+ if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
+ CAM_ERR(CAM_ISP, "Invalid context parameters");
+ return -EPERM;
+ }
+ if (atomic_read(&ctx->overflow_pending))
+ return -EINVAL;
+
+ hw_update_data = (struct cam_isp_prepare_hw_update_data *) cfg->priv;
+
+ for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ if (hw_update_data->bw_config_valid[i] == true) {
+ rc = cam_isp_blob_bw_update(
+ (struct cam_isp_bw_config *)
+ &hw_update_data->bw_config[i], ctx);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Bandwidth Update Failed");
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "Enter ctx id:%d num_hw_upd_entries %d",
+ ctx->ctx_index, cfg->num_hw_update_entries);
+
+ if (cfg->num_hw_update_entries > 0) {
+ cdm_cmd = ctx->cdm_cmd;
+ cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries;
+ cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+ cdm_cmd->flag = true;
+ cdm_cmd->userdata = ctx;
+ cdm_cmd->cookie = cfg->request_id;
+
+ for (i = 0 ; i <= cfg->num_hw_update_entries; i++) {
+ cmd = (cfg->hw_update_entries + i);
+ cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+ cdm_cmd->cmd[i].offset = cmd->offset;
+ cdm_cmd->cmd[i].len = cmd->len;
+ }
+
+ if (cfg->init_packet)
+ init_completion(&ctx->config_done_complete);
+
+ CAM_DBG(CAM_ISP, "Submit to CDM");
+ rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed to apply the configs");
+ return rc;
+ }
+
+ if (cfg->init_packet) {
+ rc = wait_for_completion_timeout(
+ &ctx->config_done_complete,
+ msecs_to_jiffies(30));
+ if (rc <= 0) {
+ CAM_ERR(CAM_ISP,
+ "config done completion timeout for req_id=%llu rc = %d",
+ cfg->request_id, rc);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ } else {
+ rc = 0;
+ CAM_DBG(CAM_ISP,
+ "config done Success for req_id=%llu",
+ cfg->request_id);
+ }
+ }
+ } else {
+ CAM_ERR(CAM_ISP, "No commands to config");
+ }
+ CAM_DBG(CAM_ISP, "Exit");
+
+ return rc;
+}
+
+static int cam_ife_mgr_stop_hw_in_overflow(void *stop_hw_args)
+{
+ int rc = 0;
+ struct cam_hw_stop_args *stop_args = stop_hw_args;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_ife_hw_mgr_ctx *ctx;
+ uint32_t i, master_base_idx = 0;
+
+ if (!stop_hw_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+ ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
+ if (!ctx || !ctx->ctx_in_use) {
+ CAM_ERR(CAM_ISP, "Invalid context is used");
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
+ ctx->ctx_index);
+
+ if (!ctx->num_base) {
+ CAM_ERR(CAM_ISP, "Number of bases are zero");
+ return -EINVAL;
+ }
+
+ /* get master base index first */
+ for (i = 0; i < ctx->num_base; i++) {
+ if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
+ master_base_idx = ctx->base[i].idx;
+ break;
+ }
+ }
+
+ if (i == ctx->num_base)
+ master_base_idx = ctx->base[0].idx;
+
+
+ /* stop the master CIDs first */
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+ master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+
+ /* stop rest of the CIDs */
+ for (i = 0; i < ctx->num_base; i++) {
+ if (i == master_base_idx)
+ continue;
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+ ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY);
+ }
+
+ /* stop the master CSID path first */
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+ master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+
+ /* Stop rest of the CSID paths */
+ for (i = 0; i < ctx->num_base; i++) {
+ if (i == master_base_idx)
+ continue;
+
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+ ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY);
+ }
+
+ /* IFE mux in resources */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
+ }
+
+ /* IFE out resources */
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+ cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
+
+
+ /* Stop tasklet for context */
+ cam_tasklet_stop(ctx->common.tasklet_info);
+ CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
+ ctx->ctx_index, rc);
+
+ return rc;
+}
+
+static int cam_ife_mgr_bw_control(struct cam_ife_hw_mgr_ctx *ctx,
+ enum cam_vfe_bw_control_action action)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_bw_control_args bw_ctrl_args;
+ int rc = -EINVAL;
+ uint32_t i;
+
+ CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ bw_ctrl_args.node_res =
+ hw_mgr_res->hw_res[i];
+ bw_ctrl_args.action = action;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_BW_CONTROL,
+ &bw_ctrl_args,
+ sizeof(struct cam_vfe_bw_control_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "BW Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
+static int cam_ife_mgr_pause_hw(struct cam_ife_hw_mgr_ctx *ctx)
+{
+ return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_EXCLUDE);
+}
+
+/* entry function: stop_hw */
+static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
+{
+ int rc = 0;
+ struct cam_hw_stop_args *stop_args = stop_hw_args;
+ struct cam_isp_stop_args *stop_isp;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_ife_hw_mgr_ctx *ctx;
+ enum cam_ife_csid_halt_cmd csid_halt_type;
+ uint32_t i, master_base_idx = 0;
+
+ if (!hw_mgr_priv || !stop_hw_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
+ if (!ctx || !ctx->ctx_in_use) {
+ CAM_ERR(CAM_ISP, "Invalid context is used");
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_ISP, " Enter...ctx id:%d", ctx->ctx_index);
+ stop_isp = (struct cam_isp_stop_args *)stop_args->args;
+
+ if ((stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_IMMEDIATELY) &&
+ (stop_isp->stop_only)) {
+ CAM_ERR(CAM_ISP, "Invalid params hw_stop_cmd:%d stop_only:%d",
+ stop_isp->hw_stop_cmd, stop_isp->stop_only);
+ return -EPERM;
+ }
+
+ /* Set the csid halt command */
+ if (stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY)
+ csid_halt_type = CAM_CSID_HALT_AT_FRAME_BOUNDARY;
+ else
+ csid_halt_type = CAM_CSID_HALT_IMMEDIATELY;
+
+ /* Note:stop resource will remove the irq mask from the hardware */
+
+ if (!ctx->num_base) {
+ CAM_ERR(CAM_ISP, "number of bases are zero");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "Halting CSIDs");
+
+ CAM_DBG(CAM_ISP, "Going to stop IFE Out");
+
+ /* IFE out resources */
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+ cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
+ /* get master base index first */
+ for (i = 0; i < ctx->num_base; i++) {
+ if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
+ master_base_idx = ctx->base[i].idx;
+ break;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "Going to stop IFE Mux");
+
+ /* IFE mux in resources */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
+ }
+
+ cam_tasklet_stop(ctx->common.tasklet_info);
+
+ /*
+ * If Context does not have PIX resources and has only RDI resource
+ * then take the first base index.
+ */
+ if (i == ctx->num_base)
+ master_base_idx = ctx->base[0].idx;
+ CAM_DBG(CAM_ISP, "Stopping master CSID idx %d", master_base_idx);
+
+ /* Stop the master CSID path first */
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+ master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+
+ /* stop rest of the CSID paths */
+ for (i = 0; i < ctx->num_base; i++) {
+ if (ctx->base[i].idx == master_base_idx)
+ continue;
+ CAM_DBG(CAM_ISP, "Stopping CSID idx %d i %d master %d",
+ ctx->base[i].idx, i, master_base_idx);
+
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+ ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+ }
+
+ CAM_DBG(CAM_ISP, "Stopping master CID idx %d", master_base_idx);
+
+ /* Stop the master CIDs first */
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+ master_base_idx, csid_halt_type);
+
+ /* stop rest of the CIDs */
+ for (i = 0; i < ctx->num_base; i++) {
+ if (ctx->base[i].idx == master_base_idx)
+ continue;
+ CAM_DBG(CAM_ISP, "Stopping CID idx %d i %d master %d",
+ ctx->base[i].idx, i, master_base_idx);
+ cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+ ctx->base[i].idx, csid_halt_type);
+ }
+
+ cam_ife_mgr_pause_hw(ctx);
+
+ if (stop_isp->stop_only)
+ goto end;
+
+ if (cam_cdm_stream_off(ctx->cdm_handle))
+ CAM_ERR(CAM_ISP, "CDM stream off failed %d", ctx->cdm_handle);
+
+ cam_ife_hw_mgr_deinit_hw(ctx);
+ CAM_DBG(CAM_ISP,
+ "Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc);
+
+ mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+ if (!atomic_dec_return(&g_ife_hw_mgr.active_ctx_cnt)) {
+ rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "SAFE SCM call failed:Check TZ/HYP dependency");
+ rc = 0;
+ }
+ }
+ mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
+end:
+ return rc;
+}
+
+static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
+ uint32_t hw_idx)
+{
+ uint32_t i = 0;
+ struct cam_hw_intf *vfe_hw_intf;
+ uint32_t vfe_reset_type;
+
+ if (!hw_mgr) {
+ CAM_DBG(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+ /* Reset VFE HW*/
+ vfe_reset_type = CAM_VFE_HW_RESET_HW;
+
+ for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
+ if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
+ continue;
+ CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
+ vfe_hw_intf = hw_mgr->ife_devices[i];
+ vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv,
+ &vfe_reset_type, sizeof(vfe_reset_type));
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "Exit Successfully");
+ return 0;
+}
+
+static int cam_ife_mgr_restart_hw(void *start_hw_args)
+{
+ int rc = -1;
+ struct cam_hw_start_args *start_args = start_hw_args;
+ struct cam_ife_hw_mgr_ctx *ctx;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ uint32_t i;
+
+ if (!start_hw_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
+ if (!ctx || !ctx->ctx_in_use) {
+ CAM_ERR(CAM_ISP, "Invalid context is used");
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
+
+ cam_tasklet_start(ctx->common.tasklet_info);
+
+ /* start the IFE out devices */
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+ rc = cam_ife_hw_mgr_start_hw_res(
+ &ctx->res_list_ife_out[i], ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)", i);
+ goto err;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d", ctx->ctx_index);
+ /* Start the IFE mux in devices */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
+ hw_mgr_res->res_id);
+ goto err;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
+ /* Start the IFE CSID HW devices */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
+ hw_mgr_res->res_id);
+ goto err;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
+ /* Start IFE root node: do nothing */
+ CAM_DBG(CAM_ISP, "Exit...(success)");
+ return 0;
+
+err:
+ cam_ife_mgr_stop_hw_in_overflow(start_hw_args);
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+ return rc;
+}
+
+static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
+{
+ int rc = -1;
+ struct cam_isp_start_args *start_isp = start_hw_args;
+ struct cam_hw_stop_args stop_args;
+ struct cam_isp_stop_args stop_isp;
+ struct cam_ife_hw_mgr_ctx *ctx;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_isp_resource_node *rsrc_node = NULL;
+ uint32_t i, camif_debug;
+
+ if (!hw_mgr_priv || !start_isp) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_ife_hw_mgr_ctx *)
+ start_isp->hw_config.ctxt_to_hw_map;
+ if (!ctx || !ctx->ctx_in_use) {
+ CAM_ERR(CAM_ISP, "Invalid context is used");
+ return -EPERM;
+ }
+
+ if ((!ctx->init_done) && start_isp->start_only) {
+ CAM_ERR(CAM_ISP, "Invalid args init_done %d start_only %d",
+ ctx->init_done, start_isp->start_only);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "Enter... ctx id:%d",
+ ctx->ctx_index);
+
+ /* update Bandwidth should be done at the hw layer */
+
+ cam_tasklet_start(ctx->common.tasklet_info);
+
+ if (ctx->init_done && start_isp->start_only)
+ goto start_only;
+
+ /* set current csid debug information to CSID HW */
+ for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+ if (g_ife_hw_mgr.csid_devices[i])
+ rc = g_ife_hw_mgr.csid_devices[i]->hw_ops.process_cmd(
+ g_ife_hw_mgr.csid_devices[i]->hw_priv,
+ CAM_IFE_CSID_SET_CSID_DEBUG,
+ &g_ife_hw_mgr.debug_cfg.csid_debug,
+ sizeof(g_ife_hw_mgr.debug_cfg.csid_debug));
+ }
+
+ camif_debug = g_ife_hw_mgr.debug_cfg.camif_debug;
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ rsrc_node = hw_mgr_res->hw_res[i];
+ if (rsrc_node->process_cmd && (rsrc_node->res_id ==
+ CAM_ISP_HW_VFE_IN_CAMIF)) {
+ rc = hw_mgr_res->hw_res[i]->process_cmd(
+ hw_mgr_res->hw_res[i],
+ CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
+ &camif_debug,
+ sizeof(camif_debug));
+ }
+ }
+ }
+
+ rc = cam_ife_hw_mgr_init_hw(ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Init failed");
+ goto tasklet_stop;
+ }
+
+ ctx->init_done = true;
+
+ mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+ if (!atomic_fetch_inc(&g_ife_hw_mgr.active_ctx_cnt)) {
+ rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_ENABLE);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "SAFE SCM call failed:Check TZ/HYP dependency");
+ rc = -EFAULT;
+ goto deinit_hw;
+ }
+ }
+ mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
+ CAM_DBG(CAM_ISP, "start cdm interface");
+ rc = cam_cdm_stream_on(ctx->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start cdm (%d)",
+ ctx->cdm_handle);
+ goto safe_disable;
+ }
+
+ /* Apply initial configuration */
+ CAM_DBG(CAM_ISP, "Config HW");
+ rc = cam_ife_mgr_config_hw(hw_mgr_priv, &start_isp->hw_config);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Config HW failed");
+ goto cdm_streamoff;
+ }
+
+start_only:
+
+ CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d",
+ ctx->ctx_index);
+ /* start the IFE out devices */
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+ rc = cam_ife_hw_mgr_start_hw_res(
+ &ctx->res_list_ife_out[i], ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)",
+ i);
+ goto err;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d",
+ ctx->ctx_index);
+ /* Start the IFE mux in devices */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
+ hw_mgr_res->res_id);
+ goto err;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d",
+ ctx->ctx_index);
+ /* Start the IFE CSID HW devices */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
+ hw_mgr_res->res_id);
+ goto err;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d",
+ ctx->ctx_index);
+ /* Start the IFE CID HW devices */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
+ hw_mgr_res->res_id);
+ goto err;
+ }
+ }
+
+ /* Start IFE root node: do nothing */
+ CAM_DBG(CAM_ISP, "Start success for ctx id:%d", ctx->ctx_index);
+
+ return 0;
+
+err:
+ stop_isp.stop_only = false;
+ stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+ stop_args.ctxt_to_hw_map = start_isp->hw_config.ctxt_to_hw_map;
+ stop_args.args = (void *)(&stop_isp);
+
+ cam_ife_mgr_stop_hw(hw_mgr_priv, &stop_args);
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+ return rc;
+
+cdm_streamoff:
+ cam_cdm_stream_off(ctx->cdm_handle);
+
+safe_disable:
+ cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE);
+
+deinit_hw:
+ cam_ife_hw_mgr_deinit_hw(ctx);
+ ctx->init_done = false;
+
+tasklet_stop:
+ cam_tasklet_stop(ctx->common.tasklet_info);
+
+ return rc;
+}
+
+static int cam_ife_mgr_read(void *hw_mgr_priv, void *read_args)
+{
+ return -EPERM;
+}
+
+static int cam_ife_mgr_write(void *hw_mgr_priv, void *write_args)
+{
+ return -EPERM;
+}
+
+static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
+ void *release_hw_args)
+{
+ int rc = 0;
+ struct cam_hw_release_args *release_args = release_hw_args;
+ struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_ife_hw_mgr_ctx *ctx;
+ uint32_t i;
+
+ if (!hw_mgr_priv || !release_hw_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
+ if (!ctx || !ctx->ctx_in_use) {
+ CAM_ERR(CAM_ISP, "Invalid context is used");
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
+ ctx->ctx_index);
+
+ if (ctx->init_done)
+ cam_ife_hw_mgr_deinit_hw(ctx);
+
+ /* we should called the stop hw before this already */
+ cam_ife_hw_mgr_release_hw_for_ctx(ctx);
+
+ /* reset base info */
+ ctx->num_base = 0;
+ memset(ctx->base, 0, sizeof(ctx->base));
+
+ /* release cdm handle */
+ cam_cdm_release(ctx->cdm_handle);
+
+ /* clean context */
+ list_del_init(&ctx->list);
+ ctx->ctx_in_use = 0;
+ ctx->is_rdi_only_context = 0;
+ ctx->cdm_handle = 0;
+ ctx->cdm_ops = NULL;
+ atomic_set(&ctx->overflow_pending, 0);
+ for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ ctx->sof_cnt[i] = 0;
+ ctx->eof_cnt[i] = 0;
+ ctx->epoch_cnt[i] = 0;
+ }
+ CAM_DBG(CAM_ISP, "Exit...ctx id:%d",
+ ctx->ctx_index);
+ cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
+ return rc;
+}
+
+static int cam_isp_blob_ubwc_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_ubwc_config *ubwc_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_ubwc_plane_cfg_v1 *ubwc_plane_cfg;
+ struct cam_kmd_buf_info *kmd_buf_info;
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ uint32_t res_id_out, i;
+ uint32_t total_used_bytes = 0;
+ uint32_t kmd_buf_remain_size;
+ uint32_t *cmd_buf_addr;
+ uint32_t bytes_used = 0;
+ int num_ent, rc = 0;
+
+ ctx = prepare->ctxt_to_hw_map;
+ if (!ctx) {
+ CAM_ERR(CAM_ISP, "Invalid ctx");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if ((prepare->num_hw_update_entries + 1) >=
+ prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d max:%d",
+ prepare->num_hw_update_entries,
+ prepare->max_hw_update_entries);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ switch (ubwc_config->api_version) {
+ case CAM_UBWC_CFG_VERSION_1:
+ CAM_DBG(CAM_ISP, "num_ports= %d", ubwc_config->num_ports);
+
+ kmd_buf_info = blob_info->kmd_buf_info;
+ for (i = 0; i < ubwc_config->num_ports; i++) {
+ ubwc_plane_cfg = &ubwc_config->ubwc_plane_cfg[i][0];
+ res_id_out = ubwc_plane_cfg->port_type & 0xFF;
+
+ CAM_DBG(CAM_ISP, "UBWC config idx %d, port_type=%d", i,
+ ubwc_plane_cfg->port_type);
+
+ if (res_id_out >= CAM_IFE_HW_OUT_RES_MAX) {
+ CAM_ERR(CAM_ISP, "Invalid port type:%x",
+ ubwc_plane_cfg->port_type);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if ((kmd_buf_info->used_bytes
+ + total_used_bytes) < kmd_buf_info->size) {
+ kmd_buf_remain_size = kmd_buf_info->size -
+ (kmd_buf_info->used_bytes
+ + total_used_bytes);
+ } else {
+ CAM_ERR(CAM_ISP,
+ "no free kmd memory for base=%d bytes_used=%u buf_size=%u",
+ blob_info->base_info->idx, bytes_used,
+ kmd_buf_info->size);
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ cmd_buf_addr = kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes/4 +
+ total_used_bytes/4;
+ hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
+
+ if (!hw_mgr_res) {
+ CAM_ERR(CAM_ISP, "Invalid hw_mgr_res");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = cam_isp_add_cmd_buf_update(
+ hw_mgr_res, blob_type,
+ blob_type_hw_cmd_map[blob_type],
+ blob_info->base_info->idx,
+ (void *)cmd_buf_addr,
+ kmd_buf_remain_size,
+ (void *)ubwc_plane_cfg,
+ &bytes_used);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP,
+ "Failed cmd_update, base_idx=%d, bytes_used=%u, res_id_out=0x%x",
+ blob_info->base_info->idx,
+ bytes_used,
+ res_id_out);
+ goto end;
+ }
+
+ total_used_bytes += bytes_used;
+ }
+
+ if (total_used_bytes) {
+ /* Update the HW entries */
+ num_ent = prepare->num_hw_update_entries;
+ prepare->hw_update_entries[num_ent].handle =
+ kmd_buf_info->handle;
+ prepare->hw_update_entries[num_ent].len =
+ total_used_bytes;
+ prepare->hw_update_entries[num_ent].offset =
+ kmd_buf_info->offset;
+ num_ent++;
+
+ kmd_buf_info->used_bytes += total_used_bytes;
+ kmd_buf_info->offset += total_used_bytes;
+ prepare->num_hw_update_entries = num_ent;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid UBWC API Version %d",
+ ubwc_config->api_version);
+ rc = -EINVAL;
+ break;
+ }
+end:
+ return rc;
+}
+
+static int cam_isp_blob_hfr_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_resource_hfr_config *hfr_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_isp_port_hfr_config *port_hfr_config;
+ struct cam_kmd_buf_info *kmd_buf_info;
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ uint32_t res_id_out, i;
+ uint32_t total_used_bytes = 0;
+ uint32_t kmd_buf_remain_size;
+ uint32_t *cmd_buf_addr;
+ uint32_t bytes_used = 0;
+ int num_ent, rc = 0;
+
+ ctx = prepare->ctxt_to_hw_map;
+ CAM_DBG(CAM_ISP, "num_ports= %d",
+ hfr_config->num_ports);
+
+ /* Max one hw entries required for hfr config update */
+ if (prepare->num_hw_update_entries + 1 >=
+ prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ prepare->num_hw_update_entries,
+ prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ kmd_buf_info = blob_info->kmd_buf_info;
+ for (i = 0; i < hfr_config->num_ports; i++) {
+ port_hfr_config = &hfr_config->port_hfr_config[i];
+ res_id_out = port_hfr_config->resource_type & 0xFF;
+
+ CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i,
+ res_id_out);
+
+ if (res_id_out >= CAM_IFE_HW_OUT_RES_MAX) {
+ CAM_ERR(CAM_ISP, "invalid out restype:%x",
+ port_hfr_config->resource_type);
+ return -EINVAL;
+ }
+
+ if ((kmd_buf_info->used_bytes
+ + total_used_bytes) < kmd_buf_info->size) {
+ kmd_buf_remain_size = kmd_buf_info->size -
+ (kmd_buf_info->used_bytes +
+ total_used_bytes);
+ } else {
+ CAM_ERR(CAM_ISP,
+ "no free kmd memory for base %d",
+ blob_info->base_info->idx);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ cmd_buf_addr = kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes/4 +
+ total_used_bytes/4;
+ hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
+
+ rc = cam_isp_add_cmd_buf_update(
+ hw_mgr_res, blob_type,
+ blob_type_hw_cmd_map[blob_type],
+ blob_info->base_info->idx,
+ (void *)cmd_buf_addr,
+ kmd_buf_remain_size,
+ (void *)port_hfr_config,
+ &bytes_used);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP,
+ "Failed cmd_update, base_idx=%d, rc=%d",
+ blob_info->base_info->idx, bytes_used);
+ return rc;
+ }
+
+ total_used_bytes += bytes_used;
+ }
+
+ if (total_used_bytes) {
+ /* Update the HW entries */
+ num_ent = prepare->num_hw_update_entries;
+ prepare->hw_update_entries[num_ent].handle =
+ kmd_buf_info->handle;
+ prepare->hw_update_entries[num_ent].len = total_used_bytes;
+ prepare->hw_update_entries[num_ent].offset =
+ kmd_buf_info->offset;
+ num_ent++;
+
+ kmd_buf_info->used_bytes += total_used_bytes;
+ kmd_buf_info->offset += total_used_bytes;
+ prepare->num_hw_update_entries = num_ent;
+ }
+
+ return rc;
+}
+
+static int cam_isp_blob_csid_clock_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_csid_clock_config *clock_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_ife_csid_clock_update_args csid_clock_upd_args;
+ uint64_t clk_rate = 0;
+ int rc = -EINVAL;
+ uint32_t i;
+
+ ctx = prepare->ctxt_to_hw_map;
+
+ CAM_DBG(CAM_ISP,
+ "csid clk=%llu", clock_config->csid_clock);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ clk_rate = 0;
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+ clk_rate = clock_config->csid_clock;
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ csid_clock_upd_args.clk_rate = clk_rate;
+ CAM_DBG(CAM_ISP, "i= %d clk=%llu\n",
+ i, csid_clock_upd_args.clk_rate);
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ blob_type_hw_cmd_map[blob_type],
+ &csid_clock_upd_args,
+ sizeof(
+ struct cam_ife_csid_clock_update_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "Clock Update failed");
+ } else
+ CAM_ERR(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
+static int cam_isp_blob_clock_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_clock_config *clock_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_clock_update_args clock_upd_args;
+ uint64_t clk_rate = 0;
+ int rc = -EINVAL;
+ uint32_t i;
+ uint32_t j;
+ bool camif_l_clk_updated = false;
+ bool camif_r_clk_updated = false;
+
+ ctx = prepare->ctxt_to_hw_map;
+
+ CAM_DBG(CAM_PERF,
+ "usage=%u left_clk= %lu right_clk=%lu",
+ clock_config->usage_type,
+ clock_config->left_pix_hz,
+ clock_config->right_pix_hz);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ clk_rate = 0;
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ if (camif_l_clk_updated)
+ continue;
+
+ clk_rate =
+ clock_config->left_pix_hz;
+
+ camif_l_clk_updated = true;
+ } else {
+ if (camif_r_clk_updated)
+ continue;
+
+ clk_rate =
+ clock_config->right_pix_hz;
+
+ camif_r_clk_updated = true;
+ }
+ } else if (hw_mgr_res->res_id ==
+ CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ if (camif_l_clk_updated)
+ continue;
+
+ clk_rate =
+ clock_config->left_pix_hz;
+
+ camif_l_clk_updated = true;
+ } else {
+ if (camif_r_clk_updated)
+ continue;
+
+ clk_rate =
+ clock_config->right_pix_hz;
+
+ camif_r_clk_updated = true;
+ }
+ } else if ((hw_mgr_res->res_id >=
+ CAM_ISP_HW_VFE_IN_RDI0) && (hw_mgr_res->res_id
+ <= CAM_ISP_HW_VFE_IN_RDI3))
+ for (j = 0; j < clock_config->num_rdi; j++)
+ clk_rate = max(clock_config->rdi_hz[j],
+ clk_rate);
+ else
+ if (hw_mgr_res->hw_res[i]) {
+ CAM_ERR(CAM_ISP, "Invalid res_id %u",
+ hw_mgr_res->res_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ clock_upd_args.node_res =
+ hw_mgr_res->hw_res[i];
+ CAM_DBG(CAM_ISP,
+ "res_id=%u i= %d clk=%llu\n",
+ hw_mgr_res->res_id, i, clk_rate);
+
+ clock_upd_args.clk_rate = clk_rate;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ &clock_upd_args,
+ sizeof(
+ struct cam_vfe_clock_update_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "Clock Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
+static int cam_isp_packet_generic_blob_handler(void *user_data,
+ uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+ int rc = 0;
+ struct cam_isp_generic_blob_info *blob_info = user_data;
+ struct cam_hw_prepare_update_args *prepare = NULL;
+
+ if (!blob_data || (blob_size == 0) || !blob_info) {
+ CAM_ERR(CAM_ISP, "Invalid info blob %pK %d prepare %pK",
+ blob_data, blob_size, prepare);
+ return -EINVAL;
+ }
+
+ if (blob_type >= CAM_ISP_GENERIC_BLOB_TYPE_MAX) {
+ CAM_ERR(CAM_ISP, "Invalid Blob Type %d Max %d", blob_type,
+ CAM_ISP_GENERIC_BLOB_TYPE_MAX);
+ return -EINVAL;
+ }
+
+ prepare = blob_info->prepare;
+ if (!prepare) {
+ CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d",
+ blob_type);
+ return -EINVAL;
+ }
+
+ switch (blob_type) {
+ case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: {
+ struct cam_isp_resource_hfr_config *hfr_config =
+ (struct cam_isp_resource_hfr_config *)blob_data;
+
+ rc = cam_isp_blob_hfr_update(blob_type, blob_info,
+ hfr_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "HFR Update Failed");
+ }
+ break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: {
+ struct cam_isp_clock_config *clock_config =
+ (struct cam_isp_clock_config *)blob_data;
+
+ rc = cam_isp_blob_clock_update(blob_type, blob_info,
+ clock_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Clock Update Failed");
+ }
+ break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: {
+ struct cam_isp_bw_config *bw_config =
+ (struct cam_isp_bw_config *)blob_data;
+ struct cam_isp_prepare_hw_update_data *prepare_hw_data;
+
+ if (!prepare || !prepare->priv ||
+ (bw_config->usage_type >= CAM_IFE_HW_NUM_MAX)) {
+ CAM_ERR(CAM_ISP, "Invalid inputs");
+ rc = -EINVAL;
+ break;
+ }
+
+ prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
+ prepare->priv;
+
+ memcpy(&prepare_hw_data->bw_config[bw_config->usage_type],
+ bw_config, sizeof(prepare_hw_data->bw_config[0]));
+ prepare_hw_data->bw_config_valid[bw_config->usage_type] = true;
+
+ }
+ break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG: {
+ struct cam_ubwc_config *ubwc_config =
+ (struct cam_ubwc_config *)blob_data;
+
+ rc = cam_isp_blob_ubwc_update(blob_type, blob_info,
+ ubwc_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "UBWC Update Failed rc: %d", rc);
+ }
+ break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: {
+ struct cam_isp_csid_clock_config *clock_config =
+ (struct cam_isp_csid_clock_config *)blob_data;
+
+ rc = cam_isp_blob_csid_clock_update(blob_type, blob_info,
+ clock_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Clock Update Failed");
+ }
+ break;
+ default:
+ CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
+ void *prepare_hw_update_args)
+{
+ int rc = 0;
+ struct cam_hw_prepare_update_args *prepare =
+ (struct cam_hw_prepare_update_args *) prepare_hw_update_args;
+ struct cam_ife_hw_mgr_ctx *ctx;
+ struct cam_ife_hw_mgr *hw_mgr;
+ struct cam_kmd_buf_info kmd_buf;
+ uint32_t i;
+ bool fill_fence = true;
+ struct cam_isp_prepare_hw_update_data *prepare_hw_data;
+
+ if (!hw_mgr_priv || !prepare_hw_update_args) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_REQ, "Enter for req_id %lld",
+ prepare->packet->header.request_id);
+
+ prepare_hw_data = (struct cam_isp_prepare_hw_update_data *)
+ prepare->priv;
+
+ ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
+ hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
+
+ rc = cam_packet_util_validate_packet(prepare->packet);
+ if (rc)
+ return rc;
+
+ /* Pre parse the packet*/
+ rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
+ if (rc)
+ return rc;
+
+ rc = cam_packet_util_process_patches(prepare->packet,
+ hw_mgr->mgr_common.cmd_iommu_hdl,
+ hw_mgr->mgr_common.cmd_iommu_hdl_secure);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
+ return rc;
+ }
+
+ prepare->num_hw_update_entries = 0;
+ prepare->num_in_map_entries = 0;
+ prepare->num_out_map_entries = 0;
+
+ memset(&prepare_hw_data->bw_config[0], 0x0,
+ sizeof(prepare_hw_data->bw_config[0]) *
+ CAM_IFE_HW_NUM_MAX);
+ memset(&prepare_hw_data->bw_config_valid[0], 0x0,
+ sizeof(prepare_hw_data->bw_config_valid[0]) *
+ CAM_IFE_HW_NUM_MAX);
+
+ for (i = 0; i < ctx->num_base; i++) {
+ CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i);
+
+ /* Add change base */
+ rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
+ ctx->base[i].idx, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in change base i=%d, idx=%d, rc=%d",
+ i, ctx->base[i].idx, rc);
+ goto end;
+ }
+
+
+ /* get command buffers */
+ if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) {
+ rc = cam_isp_add_command_buffers(prepare, &kmd_buf,
+ &ctx->base[i],
+ cam_isp_packet_generic_blob_handler,
+ ctx->res_list_ife_out, CAM_IFE_HW_OUT_RES_MAX);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
+ i, ctx->base[i].split_id, rc);
+ goto end;
+ }
+ }
+
+ /* get IO buffers */
+ rc = cam_isp_add_io_buffers(hw_mgr->mgr_common.img_iommu_hdl,
+ hw_mgr->mgr_common.img_iommu_hdl_secure,
+ prepare, ctx->base[i].idx,
+ &kmd_buf, ctx->res_list_ife_out,
+ CAM_IFE_HW_OUT_RES_MAX, fill_fence);
+
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in io buffers, i=%d, rc=%d",
+ i, rc);
+ goto end;
+ }
+
+ /* fence map table entries need to fill only once in the loop */
+ if (fill_fence)
+ fill_fence = false;
+ }
+
+ /*
+ * reg update will be done later for the initial configure.
+ * need to plus one to the op_code and only take the lower
+ * bits to get the type of operation since UMD definition
+ * of op_code has some difference from KMD.
+ */
+ if (((prepare->packet->header.op_code + 1) & 0xF) ==
+ CAM_ISP_PACKET_INIT_DEV) {
+ prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_INIT_DEV;
+ goto end;
+ } else
+ prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_UPDATE_DEV;
+
+ /* add reg update commands */
+ for (i = 0; i < ctx->num_base; i++) {
+ /* Add change base */
+ rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
+ ctx->base[i].idx, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in change base adding reg_update cmd i=%d, idx=%d, rc=%d",
+ i, ctx->base[i].idx, rc);
+ goto end;
+ }
+
+ /*Add reg update */
+ rc = cam_isp_add_reg_update(prepare, &ctx->res_list_ife_src,
+ ctx->base[i].idx, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Add Reg_update cmd Failed i=%d, idx=%d, rc=%d",
+ i, ctx->base[i].idx, rc);
+ goto end;
+ }
+ }
+
+end:
+ return rc;
+}
+
+static int cam_ife_mgr_resume_hw(struct cam_ife_hw_mgr_ctx *ctx)
+{
+ return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_INCLUDE);
+}
+
+static int cam_ife_mgr_sof_irq_debug(
+ struct cam_ife_hw_mgr_ctx *ctx,
+ uint32_t sof_irq_enable)
+{
+ int rc = 0;
+ uint32_t i = 0;
+ struct cam_ife_hw_mgr_res *hw_mgr_res = NULL;
+ struct cam_hw_intf *hw_intf = NULL;
+ struct cam_isp_resource_node *rsrc_node = NULL;
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf->hw_ops.process_cmd) {
+ rc |= hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_IFE_CSID_SOF_IRQ_DEBUG,
+ &sof_irq_enable,
+ sizeof(sof_irq_enable));
+ }
+ }
+ }
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ rsrc_node = hw_mgr_res->hw_res[i];
+ if (rsrc_node->process_cmd && (rsrc_node->res_id ==
+ CAM_ISP_HW_VFE_IN_CAMIF)) {
+ rc |= hw_mgr_res->hw_res[i]->process_cmd(
+ hw_mgr_res->hw_res[i],
+ CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
+ &sof_irq_enable,
+ sizeof(sof_irq_enable));
+ }
+ }
+ }
+
+ return rc;
+}
+
+static void cam_ife_mgr_print_io_bufs(struct cam_packet *packet,
+ int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+ bool *mem_found)
+{
+ uint64_t iova_addr;
+ size_t src_buf_size;
+ int i;
+ int j;
+ int rc = 0;
+ int32_t mmu_hdl;
+
+ struct cam_buf_io_cfg *io_cfg = NULL;
+
+ if (mem_found)
+ *mem_found = false;
+
+ io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+ packet->io_configs_offset / 4);
+
+ for (i = 0; i < packet->num_io_configs; i++) {
+ for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+ if (!io_cfg[i].mem_handle[j])
+ break;
+
+ if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+ GET_FD_FROM_HANDLE(pf_buf_info)) {
+ CAM_INFO(CAM_ISP,
+ "Found PF at port: 0x%x mem 0x%x fd: 0x%x",
+ io_cfg[i].resource_type,
+ io_cfg[i].mem_handle[j],
+ pf_buf_info);
+ if (mem_found)
+ *mem_found = true;
+ }
+
+ CAM_INFO(CAM_ISP, "port: 0x%x f: %u format: %d dir %d",
+ io_cfg[i].resource_type,
+ io_cfg[i].fence,
+ io_cfg[i].format,
+ io_cfg[i].direction);
+
+ mmu_hdl = cam_mem_is_secure_buf(
+ io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+ iommu_hdl;
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+ mmu_hdl, &iova_addr, &src_buf_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP,
+ "get src buf address fail mem_handle 0x%x",
+ io_cfg[i].mem_handle[j]);
+ continue;
+ }
+ if (iova_addr >> 32) {
+ CAM_ERR(CAM_ISP, "Invalid mapped address");
+ rc = -EINVAL;
+ continue;
+ }
+
+ CAM_INFO(CAM_ISP,
+ "pln %d w %d h %d s 0x%x addr 0x%x end_addr 0x%x offset %x memh %x",
+ j, io_cfg[i].planes[j].width,
+ io_cfg[i].planes[j].height,
+ (unsigned int)src_buf_size,
+ (unsigned int)iova_addr,
+ (unsigned int)iova_addr +
+ (unsigned int)src_buf_size,
+ io_cfg[i].offsets[j],
+ io_cfg[i].mem_handle[j]);
+ }
+ }
+}
+
+static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+ struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_ife_hw_mgr_ctx *ctx = (struct cam_ife_hw_mgr_ctx *)
+ hw_cmd_args->ctxt_to_hw_map;
+ struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL;
+
+ if (!hw_mgr_priv || !cmd_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (!ctx || !ctx->ctx_in_use) {
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used");
+ return -EPERM;
+ }
+
+ switch (hw_cmd_args->cmd_type) {
+ case CAM_HW_MGR_CMD_INTERNAL:
+ if (!hw_cmd_args->u.internal_args) {
+ CAM_ERR(CAM_ISP, "Invalid cmd arguments");
+ return -EINVAL;
+ }
+
+ isp_hw_cmd_args = (struct cam_isp_hw_cmd_args *)
+ hw_cmd_args->u.internal_args;
+
+ switch (isp_hw_cmd_args->cmd_type) {
+ case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
+ if (ctx->is_rdi_only_context)
+ isp_hw_cmd_args->u.is_rdi_only_context = 1;
+ else
+ isp_hw_cmd_args->u.is_rdi_only_context = 0;
+ break;
+ case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
+ cam_ife_mgr_pause_hw(ctx);
+ break;
+ case CAM_ISP_HW_MGR_CMD_RESUME_HW:
+ cam_ife_mgr_resume_hw(ctx);
+ break;
+ case CAM_ISP_HW_MGR_CMD_SOF_DEBUG:
+ cam_ife_mgr_sof_irq_debug(ctx,
+ isp_hw_cmd_args->u.sof_irq_enable);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
+ hw_cmd_args->cmd_type);
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+ cam_ife_mgr_print_io_bufs(
+ hw_cmd_args->u.pf_args.pf_data.packet,
+ hw_mgr->mgr_common.img_iommu_hdl,
+ hw_mgr->mgr_common.img_iommu_hdl_secure,
+ hw_cmd_args->u.pf_args.buf_info,
+ hw_cmd_args->u.pf_args.mem_found);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid cmd");
+ }
+
+ return rc;
+}
+
+static int cam_ife_mgr_cmd_get_sof_timestamp(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ uint64_t *time_stamp,
+ uint64_t *boot_time_stamp)
+{
+ int rc = -EINVAL;
+ uint32_t i;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_csid_get_time_stamp_args csid_get_time;
+
+ list_for_each_entry(hw_mgr_res, &ife_ctx->res_list_ife_csid, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i] ||
+ (i == CAM_ISP_HW_SPLIT_RIGHT))
+ continue;
+ /*
+ * Get the SOF time stamp from left resource only.
+ * Left resource is master for dual vfe case and
+ * Rdi only context case left resource only hold
+ * the RDI resource
+ */
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf->hw_ops.process_cmd) {
+ csid_get_time.node_res =
+ hw_mgr_res->hw_res[i];
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+ &csid_get_time,
+ sizeof(
+ struct cam_csid_get_time_stamp_args));
+ if (!rc) {
+ *time_stamp =
+ csid_get_time.time_stamp_val;
+ *boot_time_stamp =
+ csid_get_time.boot_timestamp;
+ }
+ /*
+ * Single VFE case, Get the time stamp from available
+ * one csid hw in the context
+ * Dual VFE case, get the time stamp from master(left)
+ * would be sufficient
+ */
+ goto end;
+ }
+ }
+ }
+end:
+ if (rc)
+ CAM_ERR(CAM_ISP, "Getting sof time stamp failed");
+
+ return rc;
+}
+
+static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
+{
+ int32_t rc = 0;
+ struct cam_hw_event_recovery_data *recovery_data = data;
+ struct cam_hw_start_args start_args;
+ struct cam_hw_stop_args stop_args;
+ struct cam_ife_hw_mgr *ife_hw_mgr = priv;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ uint32_t i = 0;
+
+ uint32_t error_type = recovery_data->error_type;
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+
+ /* Here recovery is performed */
+ CAM_DBG(CAM_ISP, "ErrorType = %d", error_type);
+
+ switch (error_type) {
+ case CAM_ISP_HW_ERROR_OVERFLOW:
+ case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
+ if (!recovery_data->affected_ctx[0]) {
+ CAM_ERR(CAM_ISP,
+ "No context is affected but recovery called");
+ kfree(recovery_data);
+ return 0;
+ }
+ /* stop resources here */
+ CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d",
+ recovery_data->no_of_context);
+ for (i = 0; i < recovery_data->no_of_context; i++) {
+ stop_args.ctxt_to_hw_map =
+ recovery_data->affected_ctx[i];
+ rc = cam_ife_mgr_stop_hw_in_overflow(&stop_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CTX stop failed(%d)", rc);
+ return rc;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "RESET: CSID PATH");
+ for (i = 0; i < recovery_data->no_of_context; i++) {
+ ctx = recovery_data->affected_ctx[i];
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid,
+ list) {
+ rc = cam_ife_hw_mgr_reset_csid_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed RESET (%d)",
+ hw_mgr_res->res_id);
+ return rc;
+ }
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "RESET: Calling VFE reset");
+
+ for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
+ if (recovery_data->affected_core[i])
+ cam_ife_mgr_reset_vfe_hw(ife_hw_mgr, i);
+ }
+
+ CAM_DBG(CAM_ISP, "START: Number of affected context: %d",
+ recovery_data->no_of_context);
+
+ for (i = 0; i < recovery_data->no_of_context; i++) {
+ ctx = recovery_data->affected_ctx[i];
+ start_args.ctxt_to_hw_map = ctx;
+
+ atomic_set(&ctx->overflow_pending, 0);
+
+ rc = cam_ife_mgr_restart_hw(&start_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CTX start failed(%d)", rc);
+ return rc;
+ }
+ CAM_DBG(CAM_ISP, "Started resources rc (%d)", rc);
+ }
+ CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc);
+
+ break;
+
+ case CAM_ISP_HW_ERROR_P2I_ERROR:
+ break;
+
+ case CAM_ISP_HW_ERROR_VIOLATION:
+ break;
+
+ default:
+ CAM_ERR(CAM_ISP, "Invalid Error");
+ }
+ CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type);
+
+ kfree(recovery_data);
+ return rc;
+}
+
+static int cam_ife_hw_mgr_do_error_recovery(
+ struct cam_hw_event_recovery_data *ife_mgr_recovery_data)
+{
+ int32_t rc = 0;
+ struct crm_workq_task *task = NULL;
+ struct cam_hw_event_recovery_data *recovery_data = NULL;
+
+ recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
+ GFP_ATOMIC);
+ if (!recovery_data)
+ return -ENOMEM;
+
+ memcpy(recovery_data, ife_mgr_recovery_data,
+ sizeof(struct cam_hw_event_recovery_data));
+
+ CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type);
+
+ task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
+ if (!task) {
+ CAM_ERR(CAM_ISP, "No empty task frame");
+ kfree(recovery_data);
+ return -ENOMEM;
+ }
+
+ task->process_cb = &cam_ife_mgr_process_recovery_cb;
+ task->payload = recovery_data;
+ rc = cam_req_mgr_workq_enqueue_task(task,
+ recovery_data->affected_ctx[0]->hw_mgr,
+ CRM_TASK_PRIORITY_0);
+
+ return rc;
+}
+
+/*
+ * This function checks if any of the valid entry in affected_core[]
+ * is associated with this context. if YES
+ * a. It fills the other cores associated with this context.in
+ * affected_core[]
+ * b. Return 1 if ctx is affected, 0 otherwise
+ */
+static int cam_ife_hw_mgr_is_ctx_affected(
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx,
+ uint32_t *affected_core, uint32_t size)
+{
+ int32_t rc = 0;
+ uint32_t i = 0, j = 0;
+ uint32_t max_idx = ife_hwr_mgr_ctx->num_base;
+ uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
+
+ CAM_DBG(CAM_ISP, "max_idx = %d", max_idx);
+
+ if ((max_idx >= CAM_IFE_HW_NUM_MAX) ||
+ (size > CAM_IFE_HW_NUM_MAX)) {
+ CAM_ERR(CAM_ISP, "invalid parameter = %d", max_idx);
+ return rc;
+ }
+
+ for (i = 0; i < max_idx; i++) {
+ if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
+ rc = 1;
+ else {
+ ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
+ CAM_DBG(CAM_ISP, "Add affected IFE %d for recovery",
+ ctx_affected_core_idx[j]);
+ j = j + 1;
+ }
+ }
+
+ if (rc == 1) {
+ while (j) {
+ if (affected_core[ctx_affected_core_idx[j-1]] != 1)
+ affected_core[ctx_affected_core_idx[j-1]] = 1;
+ j = j - 1;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * For any dual VFE context, if non-affected VFE is also serving
+ * another context, then that context should also be notified with fatal error
+ * So Loop through each context and -
+ * a. match core_idx
+ * b. Notify CTX with fatal error
+ */
+static int cam_ife_hw_mgr_find_affected_ctx(
+ struct cam_ife_hw_mgr_ctx *curr_ife_hwr_mgr_ctx,
+ struct cam_isp_hw_error_event_data *error_event_data,
+ uint32_t curr_core_idx,
+ struct cam_hw_event_recovery_data *recovery_data)
+{
+ uint32_t affected_core[CAM_IFE_HW_NUM_MAX] = {0};
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL;
+ cam_hw_event_cb_func notify_err_cb;
+ struct cam_ife_hw_mgr *ife_hwr_mgr = NULL;
+ enum cam_isp_hw_event_type event_type = CAM_ISP_HW_EVENT_ERROR;
+ uint32_t i = 0;
+
+ if (!recovery_data) {
+ CAM_ERR(CAM_ISP, "recovery_data parameter is NULL");
+ return -EINVAL;
+ }
+
+ recovery_data->no_of_context = 0;
+ affected_core[curr_core_idx] = 1;
+ ife_hwr_mgr = curr_ife_hwr_mgr_ctx->hw_mgr;
+
+ list_for_each_entry(ife_hwr_mgr_ctx,
+ &ife_hwr_mgr->used_ctx_list, list) {
+ /*
+ * Check if current core_idx matches the HW associated
+ * with this context
+ */
+ if (!cam_ife_hw_mgr_is_ctx_affected(ife_hwr_mgr_ctx,
+ affected_core, CAM_IFE_HW_NUM_MAX))
+ continue;
+
+ atomic_set(&ife_hwr_mgr_ctx->overflow_pending, 1);
+ notify_err_cb = ife_hwr_mgr_ctx->common.event_cb[event_type];
+
+ /* Add affected_context in list of recovery data */
+ CAM_DBG(CAM_ISP, "Add affected ctx %d to list",
+ ife_hwr_mgr_ctx->ctx_index);
+ if (recovery_data->no_of_context < CAM_CTX_MAX)
+ recovery_data->affected_ctx[
+ recovery_data->no_of_context++] =
+ ife_hwr_mgr_ctx;
+
+ /*
+ * In the call back function corresponding ISP context
+ * will update CRM about fatal Error
+ */
+ notify_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_ERROR, error_event_data);
+ }
+
+ /* fill the affected_core in recovery data */
+ for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ recovery_data->affected_core[i] = affected_core[i];
+ CAM_DBG(CAM_ISP, "Vfe core %d is affected (%d)",
+ i, recovery_data->affected_core[i]);
+ }
+
+ return 0;
+}
+
+static int cam_ife_hw_mgr_get_err_type(
+ void *handler_priv,
+ void *payload)
+{
+ struct cam_isp_resource_node *hw_res_l = NULL;
+ struct cam_isp_resource_node *hw_res_r = NULL;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
+ uint32_t status = 0;
+ uint32_t core_idx;
+
+ ife_hwr_mgr_ctx = handler_priv;
+ evt_payload = payload;
+
+ if (!evt_payload) {
+ CAM_ERR(CAM_ISP, "No payload");
+ return IRQ_HANDLED;
+ }
+
+ core_idx = evt_payload->core_index;
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
+
+ list_for_each_entry(isp_ife_camif_res,
+ &ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+ if ((isp_ife_camif_res->res_type ==
+ CAM_IFE_HW_MGR_RES_UNINIT) ||
+ (isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
+ continue;
+
+ hw_res_l = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
+ hw_res_r = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
+
+ CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d\n",
+ isp_ife_camif_res->is_dual_vfe);
+
+ /* ERROR check for Left VFE */
+ if (!hw_res_l) {
+ CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+ hw_res_l->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ }
+
+ if (status)
+ break;
+
+ /* ERROR check for Right VFE */
+ if (!hw_res_r) {
+ CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
+ continue;
+ }
+ CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+ hw_res_r->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_r->hw_intf->hw_idx) {
+ status = hw_res_r->bottom_half_handler(
+ hw_res_r, evt_payload);
+ }
+
+ if (status)
+ break;
+ }
+ CAM_DBG(CAM_ISP, "Exit (status = %d)!", status);
+ return status;
+}
+
+static int cam_ife_hw_mgr_handle_camif_error(
+ void *handler_priv,
+ void *payload)
+{
+ int32_t error_status;
+ uint32_t core_idx;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_isp_hw_error_event_data error_event_data = {0};
+ struct cam_hw_event_recovery_data recovery_data = {0};
+ int rc = 0;
+
+ ife_hwr_mgr_ctx = handler_priv;
+ evt_payload = payload;
+ core_idx = evt_payload->core_index;
+
+ error_status = cam_ife_hw_mgr_get_err_type(ife_hwr_mgr_ctx,
+ evt_payload);
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ return error_status;
+
+ switch (error_status) {
+ case CAM_ISP_HW_ERROR_OVERFLOW:
+ case CAM_ISP_HW_ERROR_P2I_ERROR:
+ case CAM_ISP_HW_ERROR_VIOLATION:
+ CAM_ERR(CAM_ISP, "Enter: error_type (%d)", error_status);
+ rc = -EFAULT;
+
+ if (g_ife_hw_mgr.debug_cfg.enable_recovery)
+ error_event_data.recovery_enabled = true;
+
+ error_event_data.error_type =
+ CAM_ISP_HW_ERROR_OVERFLOW;
+
+ cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
+ &error_event_data,
+ core_idx,
+ &recovery_data);
+
+ if (!g_ife_hw_mgr.debug_cfg.enable_recovery) {
+ CAM_DBG(CAM_ISP, "recovery is not enabled");
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "IFE Mgr recovery is enabled");
+ /* Trigger for recovery */
+ recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+ cam_ife_hw_mgr_do_error_recovery(&recovery_data);
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "No error (%d)", error_status);
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * DUAL VFE is valid for PIX processing path
+ * This function assumes hw_res[0] is master in case
+ * of dual VFE.
+ * RDI path does not support DUAl VFE
+ */
+static int cam_ife_hw_mgr_handle_reg_update(
+ void *handler_priv,
+ void *payload)
+{
+ struct cam_isp_resource_node *hw_res;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *ife_src_res = NULL;
+ cam_hw_event_cb_func ife_hwr_irq_rup_cb;
+ struct cam_isp_hw_reg_update_event_data rup_event_data;
+ uint32_t core_idx;
+ uint32_t rup_status = -EINVAL;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ ife_hwr_mgr_ctx = handler_priv;
+ evt_payload = payload;
+
+ if (!handler_priv || !payload) {
+ CAM_ERR(CAM_ISP, "Invalid Parameter");
+ return -EPERM;
+ }
+
+ core_idx = evt_payload->core_index;
+ ife_hwr_irq_rup_cb =
+ ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
+
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
+ list_for_each_entry(ife_src_res,
+ &ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+ if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ continue;
+
+ CAM_DBG(CAM_ISP, "resource id = %d, curr_core_idx = %d",
+ ife_src_res->res_id, core_idx);
+ switch (ife_src_res->res_id) {
+ case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
+ break;
+ case CAM_ISP_HW_VFE_IN_CAMIF:
+ if (ife_src_res->is_dual_vfe)
+ /* It checks for slave core RUP ACK*/
+ hw_res = ife_src_res->hw_res[1];
+ else
+ hw_res = ife_src_res->hw_res[0];
+
+ if (!hw_res) {
+ CAM_ERR(CAM_ISP, "CAMIF device is NULL");
+ break;
+ }
+ CAM_DBG(CAM_ISP,
+ "current_core_id = %d , core_idx res = %d",
+ core_idx, hw_res->hw_intf->hw_idx);
+
+ if (core_idx == hw_res->hw_intf->hw_idx) {
+ rup_status = hw_res->bottom_half_handler(
+ hw_res, evt_payload);
+ }
+
+ if (ife_src_res->is_dual_vfe) {
+ hw_res = ife_src_res->hw_res[0];
+ if (core_idx == hw_res->hw_intf->hw_idx) {
+ hw_res->bottom_half_handler(
+ hw_res, evt_payload);
+ }
+ }
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+
+ if (!rup_status) {
+ ife_hwr_irq_rup_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_REG_UPDATE,
+ &rup_event_data);
+ }
+ break;
+
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ hw_res = ife_src_res->hw_res[0];
+
+ if (!hw_res) {
+ CAM_ERR(CAM_ISP, "RDI Device is NULL");
+ break;
+ }
+
+ if (core_idx == hw_res->hw_intf->hw_idx)
+ rup_status = hw_res->bottom_half_handler(
+ hw_res, evt_payload);
+
+ if (!ife_hwr_mgr_ctx->is_rdi_only_context)
+ continue;
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+ if (!rup_status) {
+ /* Send the Reg update hw event */
+ ife_hwr_irq_rup_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_REG_UPDATE,
+ &rup_event_data);
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid resource id (%d)",
+ ife_src_res->res_id);
+ }
+
+ }
+
+ if (!rup_status)
+ CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+
+ return 0;
+}
+
+static int cam_ife_hw_mgr_check_irq_for_dual_vfe(
+ struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx,
+ uint32_t core_idx0,
+ uint32_t core_idx1,
+ uint32_t hw_event_type)
+{
+ int32_t rc = -1;
+ uint32_t *event_cnt = NULL;
+
+ switch (hw_event_type) {
+ case CAM_ISP_HW_EVENT_SOF:
+ event_cnt = ife_hw_mgr_ctx->sof_cnt;
+ break;
+ case CAM_ISP_HW_EVENT_EPOCH:
+ event_cnt = ife_hw_mgr_ctx->epoch_cnt;
+ break;
+ case CAM_ISP_HW_EVENT_EOF:
+ event_cnt = ife_hw_mgr_ctx->eof_cnt;
+ break;
+ default:
+ return 0;
+ }
+
+ if (event_cnt[core_idx0] ==
+ event_cnt[core_idx1]) {
+
+ event_cnt[core_idx0] = 0;
+ event_cnt[core_idx1] = 0;
+
+ rc = 0;
+ return rc;
+ }
+
+ if ((event_cnt[core_idx0] &&
+ (event_cnt[core_idx0] - event_cnt[core_idx1] > 1)) ||
+ (event_cnt[core_idx1] &&
+ (event_cnt[core_idx1] - event_cnt[core_idx0] > 1))) {
+
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "One of the VFE could not generate hw event %d",
+ hw_event_type);
+ rc = -1;
+ return rc;
+ }
+
+ CAM_DBG(CAM_ISP, "Only one core_index has given hw event %d",
+ hw_event_type);
+
+ return rc;
+}
+
+static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
+ void *handler_priv,
+ void *payload)
+{
+ int32_t rc = -EINVAL;
+ struct cam_isp_resource_node *hw_res_l;
+ struct cam_isp_resource_node *hw_res_r;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
+ cam_hw_event_cb_func ife_hwr_irq_epoch_cb;
+ struct cam_isp_hw_epoch_event_data epoch_done_event_data;
+ uint32_t core_idx;
+ uint32_t epoch_status = -EINVAL;
+ uint32_t core_index0;
+ uint32_t core_index1;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ ife_hwr_mgr_ctx = handler_priv;
+ evt_payload = payload;
+ ife_hwr_irq_epoch_cb =
+ ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
+ core_idx = evt_payload->core_index;
+
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_EPOCH;
+
+ list_for_each_entry(isp_ife_camif_res,
+ &ife_hwr_mgr_ctx->res_list_ife_src, list) {
+ if ((isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ || (isp_ife_camif_res->res_id !=
+ CAM_ISP_HW_VFE_IN_CAMIF))
+ continue;
+
+ hw_res_l = isp_ife_camif_res->hw_res[0];
+ hw_res_r = isp_ife_camif_res->hw_res[1];
+
+ switch (isp_ife_camif_res->is_dual_vfe) {
+ /* Handling Single VFE Scenario */
+ case 0:
+ /* EPOCH check for Left side VFE */
+ if (!hw_res_l) {
+ CAM_ERR(CAM_ISP, "Left Device is NULL");
+ break;
+ }
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ epoch_status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ if (atomic_read(
+ &ife_hwr_mgr_ctx->overflow_pending))
+ break;
+ if (!epoch_status)
+ ife_hwr_irq_epoch_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_EPOCH,
+ &epoch_done_event_data);
+ }
+
+ break;
+
+ /* Handling Dual VFE Scenario */
+ case 1:
+ /* SOF check for Left side VFE (Master)*/
+
+ if ((!hw_res_l) || (!hw_res_r)) {
+ CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
+ break;
+ }
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ epoch_status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+
+ if (!epoch_status)
+ ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
+ else
+ break;
+ }
+
+ /* SOF check for Right side VFE */
+ if (core_idx == hw_res_r->hw_intf->hw_idx) {
+ epoch_status = hw_res_r->bottom_half_handler(
+ hw_res_r, evt_payload);
+
+ if (!epoch_status)
+ ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
+ else
+ break;
+ }
+
+ core_index0 = hw_res_l->hw_intf->hw_idx;
+ core_index1 = hw_res_r->hw_intf->hw_idx;
+
+ rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
+ ife_hwr_mgr_ctx,
+ core_index0,
+ core_index1,
+ evt_payload->evt_id);
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+ if (!rc)
+ ife_hwr_irq_epoch_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_EPOCH,
+ &epoch_done_event_data);
+
+ break;
+
+ /* Error */
+ default:
+ CAM_ERR(CAM_ISP, "error with hw_res");
+
+ }
+ }
+
+ if (!epoch_status)
+ CAM_DBG(CAM_ISP, "Exit epoch_status = %d", epoch_status);
+
+ return 0;
+}
+
+static int cam_ife_hw_mgr_process_camif_sof(
+ struct cam_ife_hw_mgr_res *isp_ife_camif_res,
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx,
+ struct cam_vfe_top_irq_evt_payload *evt_payload)
+{
+ struct cam_isp_resource_node *hw_res_l = NULL;
+ struct cam_isp_resource_node *hw_res_r = NULL;
+ int32_t rc = -EINVAL;
+ uint32_t core_idx;
+ uint32_t sof_status = 0;
+ uint32_t core_index0;
+ uint32_t core_index1;
+
+ CAM_DBG(CAM_ISP, "Enter");
+ core_idx = evt_payload->core_index;
+ hw_res_l = isp_ife_camif_res->hw_res[0];
+ hw_res_r = isp_ife_camif_res->hw_res[1];
+ CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
+ isp_ife_camif_res->is_dual_vfe);
+
+ switch (isp_ife_camif_res->is_dual_vfe) {
+ /* Handling Single VFE Scenario */
+ case 0:
+ /* SOF check for Left side VFE */
+ if (!hw_res_l) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
+ break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
+ core_idx, hw_res_l->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ sof_status = hw_res_l->bottom_half_handler(hw_res_l,
+ evt_payload);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+ if (!sof_status)
+ rc = 0;
+ }
+
+ break;
+
+ /* Handling Dual VFE Scenario */
+ case 1:
+ /* SOF check for Left side VFE */
+
+ if (!hw_res_l) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
+ break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
+ core_idx,
+ hw_res_l->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ sof_status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ if (!sof_status)
+ ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+ else
+ break;
+ }
+
+ /* SOF check for Right side VFE */
+ if (!hw_res_r) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
+ break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
+ core_idx,
+ hw_res_r->hw_intf->hw_idx);
+ if (core_idx == hw_res_r->hw_intf->hw_idx) {
+ sof_status = hw_res_r->bottom_half_handler(hw_res_r,
+ evt_payload);
+ if (!sof_status)
+ ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+ else
+ break;
+ }
+
+ core_index0 = hw_res_l->hw_intf->hw_idx;
+ core_index1 = hw_res_r->hw_intf->hw_idx;
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+
+ rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
+ core_index0, core_index1, evt_payload->evt_id);
+
+ break;
+
+ default:
+ CAM_ERR(CAM_ISP, "error with hw_res");
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "Exit (sof_status = %d)", sof_status);
+
+ return rc;
+}
+
+static int cam_ife_hw_mgr_handle_sof(
+ void *handler_priv,
+ void *payload)
+{
+ struct cam_isp_resource_node *hw_res = NULL;
+ struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *ife_src_res = NULL;
+ cam_hw_event_cb_func ife_hw_irq_sof_cb;
+ struct cam_isp_hw_sof_event_data sof_done_event_data;
+ uint32_t sof_status = 0;
+ bool sof_sent = false;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ ife_hw_mgr_ctx = handler_priv;
+ evt_payload = payload;
+ if (!evt_payload) {
+ CAM_ERR(CAM_ISP, "no payload");
+ return IRQ_HANDLED;
+ }
+ ife_hw_irq_sof_cb =
+ ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
+
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
+
+ list_for_each_entry(ife_src_res,
+ &ife_hw_mgr_ctx->res_list_ife_src, list) {
+
+ if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ continue;
+
+ switch (ife_src_res->res_id) {
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ hw_res = ife_src_res->hw_res[0];
+ sof_status = hw_res->bottom_half_handler(
+ hw_res, evt_payload);
+
+ /* check if it is rdi only context */
+ if (ife_hw_mgr_ctx->is_rdi_only_context) {
+ if (!sof_status && !sof_sent) {
+ cam_ife_mgr_cmd_get_sof_timestamp(
+ ife_hw_mgr_ctx,
+ &sof_done_event_data.timestamp,
+ &sof_done_event_data.boot_time);
+
+ ife_hw_irq_sof_cb(
+ ife_hw_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_SOF,
+ &sof_done_event_data);
+ CAM_DBG(CAM_ISP, "sof_status = %d",
+ sof_status);
+
+ sof_sent = true;
+ }
+
+ }
+ break;
+
+ case CAM_ISP_HW_VFE_IN_CAMIF:
+ sof_status = cam_ife_hw_mgr_process_camif_sof(
+ ife_src_res, ife_hw_mgr_ctx, evt_payload);
+ if (!sof_status && !sof_sent) {
+ cam_ife_mgr_cmd_get_sof_timestamp(
+ ife_hw_mgr_ctx,
+ &sof_done_event_data.timestamp,
+ &sof_done_event_data.boot_time);
+
+ ife_hw_irq_sof_cb(
+ ife_hw_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_SOF,
+ &sof_done_event_data);
+ CAM_DBG(CAM_ISP, "sof_status = %d",
+ sof_status);
+
+ sof_sent = true;
+ }
+ break;
+ case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid resource id :%d",
+ ife_src_res->res_id);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
+ void *handler_priv,
+ void *payload)
+{
+ int32_t rc = -EINVAL;
+ struct cam_isp_resource_node *hw_res_l = NULL;
+ struct cam_isp_resource_node *hw_res_r = NULL;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
+ cam_hw_event_cb_func ife_hwr_irq_eof_cb;
+ struct cam_isp_hw_eof_event_data eof_done_event_data;
+ uint32_t core_idx;
+ uint32_t eof_status = 0;
+ uint32_t core_index0;
+ uint32_t core_index1;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ ife_hwr_mgr_ctx = handler_priv;
+ evt_payload = payload;
+ if (!evt_payload) {
+ pr_err("%s: no payload\n", __func__);
+ return IRQ_HANDLED;
+ }
+ core_idx = evt_payload->core_index;
+ ife_hwr_irq_eof_cb =
+ ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EOF];
+
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_EOF;
+
+ list_for_each_entry(isp_ife_camif_res,
+ &ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+ if ((isp_ife_camif_res->res_type ==
+ CAM_IFE_HW_MGR_RES_UNINIT) ||
+ (isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
+ continue;
+
+ hw_res_l = isp_ife_camif_res->hw_res[0];
+ hw_res_r = isp_ife_camif_res->hw_res[1];
+
+ CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
+ isp_ife_camif_res->is_dual_vfe);
+ switch (isp_ife_camif_res->is_dual_vfe) {
+ /* Handling Single VFE Scenario */
+ case 0:
+ /* EOF check for Left side VFE */
+ if (!hw_res_l) {
+ pr_err("%s: VFE Device is NULL\n",
+ __func__);
+ break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d, core idx hw = %d",
+ core_idx, hw_res_l->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ eof_status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ if (atomic_read(
+ &ife_hwr_mgr_ctx->overflow_pending))
+ break;
+ if (!eof_status)
+ ife_hwr_irq_eof_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_EOF,
+ &eof_done_event_data);
+ }
+
+ break;
+ /* Handling dual VFE Scenario */
+ case 1:
+ if ((!hw_res_l) || (!hw_res_r)) {
+ CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
+ break;
+ }
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ eof_status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+
+ if (!eof_status)
+ ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
+ else
+ break;
+ }
+
+ /* EOF check for Right side VFE */
+ if (core_idx == hw_res_r->hw_intf->hw_idx) {
+ eof_status = hw_res_r->bottom_half_handler(
+ hw_res_r, evt_payload);
+
+ if (!eof_status)
+ ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
+ else
+ break;
+ }
+
+ core_index0 = hw_res_l->hw_intf->hw_idx;
+ core_index1 = hw_res_r->hw_intf->hw_idx;
+
+ rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
+ ife_hwr_mgr_ctx,
+ core_index0,
+ core_index1,
+ evt_payload->evt_id);
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+
+ if (!rc)
+ ife_hwr_irq_eof_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_EOF,
+ &eof_done_event_data);
+
+ break;
+
+ default:
+ CAM_ERR(CAM_ISP, "error with hw_res");
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "Exit (eof_status = %d)", eof_status);
+
+ return 0;
+}
+
+
+static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
+ void *handler_priv,
+ void *payload)
+
+{
+ int32_t buf_done_status = 0;
+ int32_t i;
+ int32_t rc = 0;
+ cam_hw_event_cb_func ife_hwr_irq_wm_done_cb;
+ struct cam_isp_resource_node *hw_res_l = NULL;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload = payload;
+ struct cam_ife_hw_mgr_res *isp_ife_out_res = NULL;
+ struct cam_hw_event_recovery_data recovery_data;
+ struct cam_isp_hw_done_event_data buf_done_event_data = {0};
+ struct cam_isp_hw_error_event_data error_event_data = {0};
+ uint32_t error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
+ uint32_t num_of_error_handles = 0;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ ife_hwr_mgr_ctx = evt_payload->ctx;
+ ife_hwr_irq_wm_done_cb =
+ ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
+
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
+
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+ isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
+
+ if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ continue;
+
+ hw_res_l = isp_ife_out_res->hw_res[0];
+
+ /*
+ * DUAL VFE: Index 0 is always a master. In case of composite
+ * Error, if the error is not in master, it needs to be checked
+ * in slave (for debuging purpose only) For other cases:
+ * Index zero is valid
+ */
+
+ if (hw_res_l && (evt_payload->core_index ==
+ hw_res_l->hw_intf->hw_idx))
+ buf_done_status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ else
+ continue;
+
+ switch (buf_done_status) {
+ case CAM_VFE_IRQ_STATUS_ERR_COMP:
+ /*
+ * Write interface can pipeline upto 2 buffer done
+ * strobes from each write client. If any of the client
+ * triggers a third buffer done strobe before a
+ * composite interrupt based on the first buffer doneis
+ * triggered an error irq is set. This scenario can
+ * only happen if a client is 3 frames ahead of the
+ * other clients enabled in the same composite mask.
+ */
+ case CAM_VFE_IRQ_STATUS_COMP_OWRT:
+ /*
+ * It is an indication that bandwidth is not sufficient
+ * to generate composite done irq within the VBI time.
+ */
+
+ error_resc_handle[num_of_error_handles++] =
+ isp_ife_out_res->res_id;
+
+ if (num_of_error_handles > 0) {
+ error_event_data.error_type =
+ CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
+ goto err;
+ }
+
+ break;
+ case CAM_VFE_IRQ_STATUS_ERR:
+ break;
+ case CAM_VFE_IRQ_STATUS_SUCCESS:
+ buf_done_event_data.num_handles = 1;
+ buf_done_event_data.resource_handle[0] =
+ isp_ife_out_res->res_id;
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+ /* Report for Successful buf_done event if any */
+ if (buf_done_event_data.num_handles > 0 &&
+ ife_hwr_irq_wm_done_cb) {
+ CAM_DBG(CAM_ISP, "notify isp context");
+ ife_hwr_irq_wm_done_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_DONE,
+ &buf_done_event_data);
+ }
+
+ break;
+ default:
+ /* Do NOTHING */
+ error_resc_handle[num_of_error_handles++] =
+ isp_ife_out_res->res_id;
+ if (num_of_error_handles > 0) {
+ error_event_data.error_type =
+ CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
+ goto err;
+ }
+ break;
+ }
+ if (!buf_done_status)
+ CAM_DBG(CAM_ISP,
+ "buf_done status:(%d),out_res->res_id: 0x%x",
+ buf_done_status, isp_ife_out_res->res_id);
+ }
+
+ return rc;
+
+err:
+ /*
+ * Report for error if any.
+ * For the first phase, Error is reported as overflow, for all
+ * the affected context and any successful buf_done event is not
+ * reported.
+ */
+ rc = cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
+ &error_event_data, evt_payload->core_index,
+ &recovery_data);
+
+ /*
+ * We can temporarily return from here as
+ * for the first phase, we are going to reset entire HW.
+ */
+
+ CAM_DBG(CAM_ISP, "Exit buf_done_status Error = %d",
+ buf_done_status);
+ return rc;
+}
+
+int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
+ void *evt_payload_priv)
+{
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = handler_priv;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+ int rc = -EINVAL;
+
+ if (!handler_priv)
+ return rc;
+
+ evt_payload = evt_payload_priv;
+ ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
+
+ CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core index:0x%x",
+ evt_payload, evt_payload->core_index);
+ CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+ CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+ CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
+ CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
+ CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
+ evt_payload->irq_reg_val[4]);
+ CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
+ evt_payload->irq_reg_val[5]);
+ CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
+ evt_payload->irq_reg_val[6]);
+ /* WM Done */
+ return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+}
+
+int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
+{
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = handler_priv;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ int rc = -EINVAL;
+
+ if (!evt_payload_priv)
+ return rc;
+
+ evt_payload = evt_payload_priv;
+ if (!handler_priv)
+ return rc;
+
+ ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+
+ CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
+ (void *)evt_payload,
+ evt_payload->core_index);
+ CAM_DBG(CAM_ISP, "irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+ CAM_DBG(CAM_ISP, "irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+ CAM_DBG(CAM_ISP, "Violation register: = %x",
+ evt_payload->irq_reg_val[2]);
+
+ /*
+ * If overflow/overwrite/error/violation are pending
+ * for this context it needs to be handled remaining
+ * interrupts are ignored.
+ */
+ rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Encountered Error (%d), ignoring other irqs",
+ rc);
+ goto put_payload;
+ }
+
+ CAM_DBG(CAM_ISP, "Calling EOF");
+ cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+
+ CAM_DBG(CAM_ISP, "Calling SOF");
+ /* SOF IRQ */
+ cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+
+ CAM_DBG(CAM_ISP, "Calling RUP");
+ /* REG UPDATE */
+ cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+
+ CAM_DBG(CAM_ISP, "Calling EPOCH");
+ /* EPOCH IRQ */
+ cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+
+put_payload:
+ cam_vfe_put_evt_payload(evt_payload->core_info, &evt_payload);
+ return IRQ_HANDLED;
+}
+
+static int cam_ife_hw_mgr_sort_dev_with_caps(
+ struct cam_ife_hw_mgr *ife_hw_mgr)
+{
+ int i;
+
+ /* get caps for csid devices */
+ for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+ if (!ife_hw_mgr->csid_devices[i])
+ continue;
+ if (ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps) {
+ ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps(
+ ife_hw_mgr->csid_devices[i]->hw_priv,
+ &ife_hw_mgr->ife_csid_dev_caps[i],
+ sizeof(ife_hw_mgr->ife_csid_dev_caps[i]));
+ }
+ }
+
+ /* get caps for ife devices */
+ for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ if (!ife_hw_mgr->ife_devices[i])
+ continue;
+ if (ife_hw_mgr->ife_devices[i]->hw_ops.get_hw_caps) {
+ ife_hw_mgr->ife_devices[i]->hw_ops.get_hw_caps(
+ ife_hw_mgr->ife_devices[i]->hw_priv,
+ &ife_hw_mgr->ife_dev_caps[i],
+ sizeof(ife_hw_mgr->ife_dev_caps[i]));
+ }
+ }
+
+ return 0;
+}
+
+static int cam_ife_set_csid_debug(void *data, u64 val)
+{
+ g_ife_hw_mgr.debug_cfg.csid_debug = val;
+ CAM_DBG(CAM_ISP, "Set CSID Debug value :%lld", val);
+ return 0;
+}
+
+static int cam_ife_get_csid_debug(void *data, u64 *val)
+{
+ *val = g_ife_hw_mgr.debug_cfg.csid_debug;
+ CAM_DBG(CAM_ISP, "Get CSID Debug value :%lld",
+ g_ife_hw_mgr.debug_cfg.csid_debug);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_ife_csid_debug,
+ cam_ife_get_csid_debug,
+ cam_ife_set_csid_debug, "%16llu");
+
+static int cam_ife_set_camif_debug(void *data, u64 val)
+{
+ g_ife_hw_mgr.debug_cfg.camif_debug = val;
+ CAM_DBG(CAM_ISP,
+ "Set camif enable_diag_sensor_status value :%lld", val);
+ return 0;
+}
+
+static int cam_ife_get_camif_debug(void *data, u64 *val)
+{
+ *val = g_ife_hw_mgr.debug_cfg.camif_debug;
+ CAM_DBG(CAM_ISP,
+ "Set camif enable_diag_sensor_status value :%lld",
+ g_ife_hw_mgr.debug_cfg.csid_debug);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_ife_camif_debug,
+ cam_ife_get_camif_debug,
+ cam_ife_set_camif_debug, "%16llu");
+
+static int cam_ife_hw_mgr_debug_register(void)
+{
+ g_ife_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_ife",
+ NULL);
+
+ if (!g_ife_hw_mgr.debug_cfg.dentry) {
+ CAM_ERR(CAM_ISP, "failed to create dentry");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_file("ife_csid_debug",
+ 0644,
+ g_ife_hw_mgr.debug_cfg.dentry, NULL,
+ &cam_ife_csid_debug)) {
+ CAM_ERR(CAM_ISP, "failed to create cam_ife_csid_debug");
+ goto err;
+ }
+
+ if (!debugfs_create_u32("enable_recovery",
+ 0644,
+ g_ife_hw_mgr.debug_cfg.dentry,
+ &g_ife_hw_mgr.debug_cfg.enable_recovery)) {
+ CAM_ERR(CAM_ISP, "failed to create enable_recovery");
+ goto err;
+ }
+
+ if (!debugfs_create_file("ife_camif_debug",
+ 0644,
+ g_ife_hw_mgr.debug_cfg.dentry, NULL,
+ &cam_ife_camif_debug)) {
+ CAM_ERR(CAM_ISP, "failed to create cam_ife_camif_debug");
+ goto err;
+ }
+ g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(g_ife_hw_mgr.debug_cfg.dentry);
+ return -ENOMEM;
+}
+
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
+{
+ int rc = -EFAULT;
+ int i, j;
+ struct cam_iommu_handle cdm_handles;
+ struct cam_ife_hw_mgr_ctx *ctx_pool;
+ struct cam_ife_hw_mgr_res *res_list_ife_out;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
+
+ mutex_init(&g_ife_hw_mgr.ctx_mutex);
+
+ if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
+ CAM_ERR(CAM_ISP, "CSID num is different then IFE num");
+ return -EINVAL;
+ }
+
+ /* fill ife hw intf information */
+ for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+ rc = cam_vfe_hw_init(&g_ife_hw_mgr.ife_devices[i], i);
+ if (!rc) {
+ struct cam_hw_info *vfe_hw =
+ (struct cam_hw_info *)
+ g_ife_hw_mgr.ife_devices[i]->hw_priv;
+ struct cam_hw_soc_info *soc_info = &vfe_hw->soc_info;
+
+ j++;
+
+ g_ife_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0];
+ CAM_DBG(CAM_ISP,
+ "reg_map: mem base = %pK cam_base = 0x%llx",
+ (void __iomem *)soc_info->reg_map[0].mem_base,
+ (uint64_t) soc_info->reg_map[0].mem_cam_base);
+ } else {
+ g_ife_hw_mgr.cdm_reg_map[i] = NULL;
+ }
+ }
+ if (j == 0) {
+ CAM_ERR(CAM_ISP, "no valid IFE HW");
+ return -EINVAL;
+ }
+
+ /* fill csid hw intf information */
+ for (i = 0, j = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+ rc = cam_ife_csid_hw_init(&g_ife_hw_mgr.csid_devices[i], i);
+ if (!rc)
+ j++;
+ }
+ if (!j) {
+ CAM_ERR(CAM_ISP, "no valid IFE CSID HW");
+ return -EINVAL;
+ }
+
+ cam_ife_hw_mgr_sort_dev_with_caps(&g_ife_hw_mgr);
+
+ /* setup ife context list */
+ INIT_LIST_HEAD(&g_ife_hw_mgr.free_ctx_list);
+ INIT_LIST_HEAD(&g_ife_hw_mgr.used_ctx_list);
+
+ /*
+ * for now, we only support one iommu handle. later
+ * we will need to setup more iommu handle for other
+ * use cases.
+ * Also, we have to release them once we have the
+ * deinit support
+ */
+ if (cam_smmu_get_handle("ife",
+ &g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
+ CAM_ERR(CAM_ISP, "Can not get iommu handle");
+ return -EINVAL;
+ }
+
+ if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+ CAM_SMMU_ATTACH)) {
+ CAM_ERR(CAM_ISP, "Attach iommu handle failed.");
+ goto attach_fail;
+ }
+
+ if (cam_smmu_get_handle("cam-secure",
+ &g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure)) {
+ CAM_ERR(CAM_ISP, "Failed to get secure iommu handle");
+ goto secure_fail;
+ }
+
+ CAM_DBG(CAM_ISP, "iommu_handles: non-secure[0x%x], secure[0x%x]",
+ g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+ g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
+
+ if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
+ CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
+ g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure;
+ g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure =
+ cdm_handles.secure;
+ } else {
+ CAM_DBG(CAM_ISP, "Failed to acquire the CDM iommu handles");
+ g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = -1;
+ g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
+ }
+
+ atomic_set(&g_ife_hw_mgr.active_ctx_cnt, 0);
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ memset(&g_ife_hw_mgr.ctx_pool[i], 0,
+ sizeof(g_ife_hw_mgr.ctx_pool[i]));
+ INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].list);
+
+ INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_in.list);
+ INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_cid);
+ INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_csid);
+ INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_src);
+ ctx_pool = &g_ife_hw_mgr.ctx_pool[i];
+ for (j = 0; j < CAM_IFE_HW_OUT_RES_MAX; j++) {
+ res_list_ife_out = &ctx_pool->res_list_ife_out[j];
+ INIT_LIST_HEAD(&res_list_ife_out->list);
+ }
+
+ /* init context pool */
+ INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].free_res_list);
+ for (j = 0; j < CAM_IFE_HW_RES_POOL_MAX; j++) {
+ INIT_LIST_HEAD(
+ &g_ife_hw_mgr.ctx_pool[i].res_pool[j].list);
+ list_add_tail(
+ &g_ife_hw_mgr.ctx_pool[i].res_pool[j].list,
+ &g_ife_hw_mgr.ctx_pool[i].free_res_list);
+ }
+
+ g_ife_hw_mgr.ctx_pool[i].cdm_cmd =
+ kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+ ((CAM_IFE_HW_ENTRIES_MAX - 1) *
+ sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+ if (!g_ife_hw_mgr.ctx_pool[i].cdm_cmd) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_ISP, "Allocation Failed for cdm command");
+ goto end;
+ }
+
+ g_ife_hw_mgr.ctx_pool[i].ctx_index = i;
+ g_ife_hw_mgr.ctx_pool[i].hw_mgr = &g_ife_hw_mgr;
+
+ cam_tasklet_init(&g_ife_hw_mgr.mgr_common.tasklet_pool[i],
+ &g_ife_hw_mgr.ctx_pool[i], i);
+ g_ife_hw_mgr.ctx_pool[i].common.tasklet_info =
+ g_ife_hw_mgr.mgr_common.tasklet_pool[i];
+
+
+ init_completion(&g_ife_hw_mgr.ctx_pool[i].config_done_complete);
+ list_add_tail(&g_ife_hw_mgr.ctx_pool[i].list,
+ &g_ife_hw_mgr.free_ctx_list);
+ }
+
+ /* Create Worker for ife_hw_mgr with 10 tasks */
+ rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
+ &g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ, 0);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "Unable to create worker");
+ goto end;
+ }
+
+ /* fill return structure */
+ hw_mgr_intf->hw_mgr_priv = &g_ife_hw_mgr;
+ hw_mgr_intf->hw_get_caps = cam_ife_mgr_get_hw_caps;
+ hw_mgr_intf->hw_acquire = cam_ife_mgr_acquire;
+ hw_mgr_intf->hw_start = cam_ife_mgr_start_hw;
+ hw_mgr_intf->hw_stop = cam_ife_mgr_stop_hw;
+ hw_mgr_intf->hw_read = cam_ife_mgr_read;
+ hw_mgr_intf->hw_write = cam_ife_mgr_write;
+ hw_mgr_intf->hw_release = cam_ife_mgr_release_hw;
+ hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
+ hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
+ hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
+
+ if (iommu_hdl)
+ *iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;
+
+ cam_ife_hw_mgr_debug_register();
+ CAM_DBG(CAM_ISP, "Exit");
+
+ return 0;
+end:
+ if (rc) {
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ cam_tasklet_deinit(
+ &g_ife_hw_mgr.mgr_common.tasklet_pool[i]);
+ kfree(g_ife_hw_mgr.ctx_pool[i].cdm_cmd);
+ g_ife_hw_mgr.ctx_pool[i].cdm_cmd = NULL;
+ g_ife_hw_mgr.ctx_pool[i].common.tasklet_info = NULL;
+ }
+ }
+ cam_smmu_destroy_handle(
+ g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
+ g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
+secure_fail:
+ cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+ CAM_SMMU_DETACH);
+attach_fail:
+ cam_smmu_destroy_handle(g_ife_hw_mgr.mgr_common.img_iommu_hdl);
+ g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1;
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
new file mode 100644
index 000000000000..cf1e425558f3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -0,0 +1,237 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_HW_MGR_H_
+#define _CAM_IFE_HW_MGR_H_
+
+#include <linux/completion.h>
+#include "cam_isp_hw_mgr.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_tasklet_util.h"
+
+/* enum cam_ife_hw_mgr_res_type - manager resource node type */
+enum cam_ife_hw_mgr_res_type {
+ CAM_IFE_HW_MGR_RES_UNINIT,
+ CAM_IFE_HW_MGR_RES_ROOT,
+ CAM_IFE_HW_MGR_RES_CID,
+ CAM_IFE_HW_MGR_RES_CSID,
+ CAM_IFE_HW_MGR_RES_IFE_SRC,
+ CAM_IFE_HW_MGR_RES_IFE_OUT,
+};
+
+/* IFE resource constants */
+#define CAM_IFE_HW_IN_RES_MAX (CAM_ISP_IFE_IN_RES_MAX & 0xFF)
+#define CAM_IFE_HW_OUT_RES_MAX (CAM_ISP_IFE_OUT_RES_MAX & 0xFF)
+#define CAM_IFE_HW_RES_POOL_MAX 64
+
+/**
+ * struct cam_vfe_hw_mgr_res- HW resources for the VFE manager
+ *
+ * @list: used by the resource list
+ * @res_type: IFE manager resource type
+ * @res_id: resource id based on the resource type for root or
+ * leaf resource, it matches the KMD interface port id.
+ * For branch resrouce, it is defined by the ISP HW
+ * layer
+ * @hw_res: hw layer resource array. For single VFE, only one VFE
+ * hw resrouce will be acquired. For dual VFE, two hw
+ * resources from different VFE HW device will be
+ * acquired
+ * @parent: point to the parent resource node.
+ * @children: point to the children resource nodes
+ * @child_num: numbe of the child resource node.
+ * @is_secure informs whether the resource is in secure mode or not
+ *
+ */
+struct cam_ife_hw_mgr_res {
+ struct list_head list;
+ enum cam_ife_hw_mgr_res_type res_type;
+ uint32_t res_id;
+ uint32_t is_dual_vfe;
+ struct cam_isp_resource_node *hw_res[CAM_ISP_HW_SPLIT_MAX];
+
+ /* graph */
+ struct cam_ife_hw_mgr_res *parent;
+ struct cam_ife_hw_mgr_res *child[CAM_IFE_HW_OUT_RES_MAX];
+ uint32_t num_children;
+ uint32_t is_secure;
+};
+
+
+/**
+ * struct ctx_base_info - Base hardware information for the context
+ *
+ * @idx: Base resource index
+ * @split_id: Split info for the base resource
+ *
+ */
+struct ctx_base_info {
+ uint32_t idx;
+ enum cam_isp_hw_split_id split_id;
+};
+
+/**
+ * struct cam_ife_hw_mgr_debug - contain the debug information
+ *
+ * @dentry: Debugfs entry
+ * @csid_debug: csid debug information
+ * @enable_recovery: enable recovery
+ * @enable_diag_sensor_status: enable sensor diagnosis status
+ *
+ */
+struct cam_ife_hw_mgr_debug {
+ struct dentry *dentry;
+ uint64_t csid_debug;
+ uint32_t enable_recovery;
+ uint32_t camif_debug;
+};
+
+/**
+ * struct cam_vfe_hw_mgr_ctx - IFE HW manager Context object
+ *
+ * @list: used by the ctx list.
+ * @common: common acquired context data
+ * @ctx_index: acquired context id.
+ * @hw_mgr: IFE hw mgr which owns this context
+ * @ctx_in_use: flag to tell whether context is active
+ * @res_list_ife_in: Starting resource(TPG,PHY0, PHY1...) Can only be
+ * one.
+ * @res_list_csid: CSID resource list
+ * @res_list_ife_src: IFE input resource list
+ * @res_list_ife_out: IFE output resoruces array
+ * @free_res_list: Free resources list for the branch node
+ * @res_pool: memory storage for the free resource list
+ * @irq_status0_mask: irq_status0_mask for the context
+ * @irq_status1_mask: irq_status1_mask for the context
+ * @base device base index array contain the all IFE HW
+ * instance associated with this context.
+ * @num_base number of valid base data in the base array
+ * @cdm_handle cdm hw acquire handle
+ * @cdm_ops cdm util operation pointer for building
+ * cdm commands
+ * @cdm_cmd cdm base and length request pointer
+ * @sof_cnt sof count value per core, used for dual VFE
+ * @epoch_cnt epoch count value per core, used for dual VFE
+ * @eof_cnt eof count value per core, used for dual VFE
+ * @overflow_pending flat to specify the overflow is pending for the
+ * context
+ * @is_rdi_only_context flag to specify the context has only rdi resource
+ * @config_done_complete indicator for configuration complete
+ * @init_done indicate whether init hw is done
+ */
+struct cam_ife_hw_mgr_ctx {
+ struct list_head list;
+ struct cam_isp_hw_mgr_ctx common;
+
+ uint32_t ctx_index;
+ struct cam_ife_hw_mgr *hw_mgr;
+ uint32_t ctx_in_use;
+
+ struct cam_ife_hw_mgr_res res_list_ife_in;
+ struct list_head res_list_ife_cid;
+ struct list_head res_list_ife_csid;
+ struct list_head res_list_ife_src;
+ struct cam_ife_hw_mgr_res res_list_ife_out[
+ CAM_IFE_HW_OUT_RES_MAX];
+
+ struct list_head free_res_list;
+ struct cam_ife_hw_mgr_res res_pool[CAM_IFE_HW_RES_POOL_MAX];
+
+ uint32_t irq_status0_mask[CAM_IFE_HW_NUM_MAX];
+ uint32_t irq_status1_mask[CAM_IFE_HW_NUM_MAX];
+ struct ctx_base_info base[CAM_IFE_HW_NUM_MAX];
+ uint32_t num_base;
+ uint32_t cdm_handle;
+ struct cam_cdm_utils_ops *cdm_ops;
+ struct cam_cdm_bl_request *cdm_cmd;
+
+ uint32_t sof_cnt[CAM_IFE_HW_NUM_MAX];
+ uint32_t epoch_cnt[CAM_IFE_HW_NUM_MAX];
+ uint32_t eof_cnt[CAM_IFE_HW_NUM_MAX];
+ atomic_t overflow_pending;
+ uint32_t is_rdi_only_context;
+ struct completion config_done_complete;
+ bool init_done;
+};
+
+/**
+ * struct cam_ife_hw_mgr - IFE HW Manager
+ *
+ * @mgr_common: common data for all HW managers
+ * @csid_devices; csid device instances array. This will be filled by
+ * HW manager during the initialization.
+ * @ife_devices: IFE device instances array. This will be filled by
+ * HW layer during initialization
+ * @ctx_mutex: mutex for the hw context pool
+ * @free_ctx_list: free hw context list
+ * @used_ctx_list: used hw context list
+ * @ctx_pool: context storage
+ * @ife_csid_dev_caps csid device capability stored per core
+ * @ife_dev_caps ife device capability per core
+ * @work q work queue for IFE hw manager
+ * @debug_cfg debug configuration
+ */
+struct cam_ife_hw_mgr {
+ struct cam_isp_hw_mgr mgr_common;
+ struct cam_hw_intf *csid_devices[CAM_IFE_CSID_HW_NUM_MAX];
+ struct cam_hw_intf *ife_devices[CAM_IFE_HW_NUM_MAX];
+ struct cam_soc_reg_map *cdm_reg_map[CAM_IFE_HW_NUM_MAX];
+
+ struct mutex ctx_mutex;
+ atomic_t active_ctx_cnt;
+ struct list_head free_ctx_list;
+ struct list_head used_ctx_list;
+ struct cam_ife_hw_mgr_ctx ctx_pool[CAM_CTX_MAX];
+
+ struct cam_ife_csid_hw_caps ife_csid_dev_caps[
+ CAM_IFE_CSID_HW_NUM_MAX];
+ struct cam_vfe_hw_get_hw_cap ife_dev_caps[CAM_IFE_HW_NUM_MAX];
+ struct cam_req_mgr_core_workq *workq;
+ struct cam_ife_hw_mgr_debug debug_cfg;
+};
+
+/**
+ * cam_ife_hw_mgr_init()
+ *
+ * @brief: Initialize the IFE hardware manger. This is the
+ * etnry functinon for the IFE HW manager.
+ *
+ * @hw_mgr_intf: IFE hardware manager object returned
+ * @iommu_hdl: Iommu handle to be returned
+ *
+ */
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl);
+
+/**
+ * cam_ife_mgr_do_tasklet_buf_done()
+ *
+ * @brief: Main tasklet handle function for the buf done event
+ *
+ * @handler_priv: Tasklet information handle
+ * @evt_payload_priv: Event payload for the handler funciton
+ *
+ */
+int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv, void *evt_payload_priv);
+
+/**
+ * cam_ife_mgr_do_tasklet()
+ *
+ * @brief: Main tasklet handle function for mux resource events
+ *
+ * @handler_priv: Tasklet information handle
+ * @evt_payload_priv: Event payload for the handler funciton
+ *
+ */
+int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv);
+
+#endif /* _CAM_IFE_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
new file mode 100644
index 000000000000..ef0ec31c328e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_debug_util.h"
+
+
+int cam_isp_hw_mgr_init(struct device_node *of_node,
+ struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl)
+{
+ int rc = 0;
+ const char *compat_str = NULL;
+
+ rc = of_property_read_string_index(of_node, "arch-compat", 0,
+ (const char **)&compat_str);
+
+ if (strnstr(compat_str, "ife", strlen(compat_str)))
+ rc = cam_ife_hw_mgr_init(hw_mgr, iommu_hdl);
+ else {
+ CAM_ERR(CAM_ISP, "Invalid ISP hw type");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
new file mode 100644
index 000000000000..af224ad5aa4d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_MGR_H_
+#define _CAM_ISP_HW_MGR_H_
+
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_tasklet_util.h"
+
+#define CAM_ISP_HW_NUM_MAX 4
+
+/**
+ * struct cam_isp_hw_mgr_ctx - common acquired context for managers
+ *
+ * @takslet_info: assciated tasklet
+ * @event_cb: call back interface to ISP context. Set during
+ * acquire device
+ * @cb_priv: first argument for the call back function
+ * set during acquire device
+ *
+ */
+struct cam_isp_hw_mgr_ctx {
+ void *tasklet_info;
+ cam_hw_event_cb_func event_cb[CAM_ISP_HW_EVENT_MAX];
+ void *cb_priv;
+};
+
+/**
+ * struct cam_isp_hw_mgr - ISP HW Manager common object
+ *
+ * @tasklet_pool: Tasklet pool
+ * @img_iommu_hdl: iommu memory handle for regular image buffer
+ * @img_iommu_hdl_secure: iommu memory handle for secure image buffer
+ * @cmd_iommu_hdl: iommu memory handle for regular command buffer
+ * @cmd_iommu_hdl: iommu memory handle for secure command buffer
+ * @scratch_buf_range: scratch buffer range (not for IFE)
+ * @scratch_buf_addr: scratch buffer address (not for IFE)
+ *
+ */
+struct cam_isp_hw_mgr {
+ void *tasklet_pool[CAM_CTX_MAX];
+ int img_iommu_hdl;
+ int img_iommu_hdl_secure;
+ int cmd_iommu_hdl;
+ int cmd_iommu_hdl_secure;
+ uint32_t scratch_buf_range;
+ dma_addr_t scratch_buf_addr;
+};
+
+/**
+ * struct cam_hw_event_recovery_data - Payload for the recovery procedure
+ *
+ * @error_type: Error type that causes the recovery
+ * @affected_core: Array of the hardware cores that are affected
+ * @affected_ctx: Array of the hardware contexts that are affected
+ * @no_of_context: Actual number of the affected context
+ *
+ */
+struct cam_hw_event_recovery_data {
+ uint32_t error_type;
+ uint32_t affected_core[CAM_ISP_HW_NUM_MAX];
+ struct cam_ife_hw_mgr_ctx *affected_ctx[CAM_CTX_MAX];
+ uint32_t no_of_context;
+};
+#endif /* _CAM_ISP_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/Makefile
new file mode 100644
index 000000000000..499df45da9c2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/Makefile
@@ -0,0 +1,12 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_tasklet_util.o cam_isp_packet_parser.o
+obj-$(CONFIG_SPECTRA_CAMERA) += irq_controller/
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
new file mode 100644
index 000000000000..cd1d024148ff
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -0,0 +1,787 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <uapi/media/cam_defs.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_mem_mgr.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_isp_packet_parser.h"
+#include "cam_debug_util.h"
+
+int cam_isp_add_change_base(
+ struct cam_hw_prepare_update_args *prepare,
+ struct list_head *res_list_isp_src,
+ uint32_t base_idx,
+ struct cam_kmd_buf_info *kmd_buf_info)
+{
+ int rc = -EINVAL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_isp_resource_node *res;
+ struct cam_isp_hw_get_cmd_update get_base;
+ struct cam_hw_update_entry *hw_entry;
+ uint32_t num_ent, i;
+
+ hw_entry = prepare->hw_update_entries;
+ num_ent = prepare->num_hw_update_entries;
+
+ /* Max one hw entries required for each base */
+ if (num_ent + 1 >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(hw_mgr_res, res_list_isp_src, list) {
+ if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ continue;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ res = hw_mgr_res->hw_res[i];
+ if (res->hw_intf->hw_idx != base_idx)
+ continue;
+
+ get_base.res = res;
+ get_base.cmd_type = CAM_ISP_HW_CMD_GET_CHANGE_BASE;
+ get_base.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes/4;
+ get_base.cmd.size = kmd_buf_info->size -
+ kmd_buf_info->used_bytes;
+
+ rc = res->hw_intf->hw_ops.process_cmd(
+ res->hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_GET_CHANGE_BASE, &get_base,
+ sizeof(struct cam_isp_hw_get_cmd_update));
+ if (rc)
+ return rc;
+
+ hw_entry[num_ent].handle = kmd_buf_info->handle;
+ hw_entry[num_ent].len = get_base.cmd.used_bytes;
+ hw_entry[num_ent].offset = kmd_buf_info->offset;
+ CAM_DBG(CAM_ISP,
+ "num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ hw_entry[num_ent].handle,
+ hw_entry[num_ent].len,
+ hw_entry[num_ent].offset);
+
+ kmd_buf_info->used_bytes += get_base.cmd.used_bytes;
+ kmd_buf_info->offset += get_base.cmd.used_bytes;
+ num_ent++;
+ prepare->num_hw_update_entries = num_ent;
+
+ /* return success */
+ return 0;
+ }
+ }
+
+ return rc;
+}
+
+static int cam_isp_update_dual_config(
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_cmd_buf_desc *cmd_desc,
+ uint32_t split_id,
+ uint32_t base_idx,
+ struct cam_ife_hw_mgr_res *res_list_isp_out,
+ uint32_t size_isp_out)
+{
+ int rc = -EINVAL;
+ struct cam_isp_dual_config *dual_config;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_isp_resource_node *res;
+ struct cam_isp_hw_dual_isp_update_args dual_isp_update_args;
+ uint32_t outport_id;
+ uint32_t ports_plane_idx;
+ size_t len = 0;
+ uint32_t *cpu_addr;
+ uint32_t i, j;
+
+ CAM_DBG(CAM_UTIL, "cmd des size %d, length: %d",
+ cmd_desc->size, cmd_desc->length);
+
+ rc = cam_packet_util_get_cmd_mem_addr(
+ cmd_desc->mem_handle, &cpu_addr, &len);
+ if (rc)
+ return rc;
+
+ cpu_addr += (cmd_desc->offset / 4);
+ dual_config = (struct cam_isp_dual_config *)cpu_addr;
+
+ for (i = 0; i < dual_config->num_ports; i++) {
+
+ if (i >= CAM_ISP_IFE_OUT_RES_MAX) {
+ CAM_ERR(CAM_UTIL,
+ "failed update for i:%d > size_isp_out:%d",
+ i, size_isp_out);
+ return -EINVAL;
+ }
+
+ hw_mgr_res = &res_list_isp_out[i];
+ for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+ if (!hw_mgr_res->hw_res[j])
+ continue;
+
+ if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
+ continue;
+
+ res = hw_mgr_res->hw_res[j];
+
+ if (res->res_id < CAM_ISP_IFE_OUT_RES_BASE ||
+ res->res_id >= CAM_ISP_IFE_OUT_RES_MAX)
+ continue;
+
+ outport_id = res->res_id & 0xFF;
+
+ ports_plane_idx = (j * (dual_config->num_ports *
+ CAM_PACKET_MAX_PLANES)) +
+ (outport_id * CAM_PACKET_MAX_PLANES);
+
+ if (dual_config->stripes[ports_plane_idx].port_id == 0)
+ continue;
+
+ dual_isp_update_args.split_id = j;
+ dual_isp_update_args.res = res;
+ dual_isp_update_args.dual_cfg = dual_config;
+ rc = res->hw_intf->hw_ops.process_cmd(
+ res->hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_STRIPE_UPDATE,
+ &dual_isp_update_args,
+ sizeof(struct cam_isp_hw_dual_isp_update_args));
+ if (rc)
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+int cam_isp_add_cmd_buf_update(
+ struct cam_ife_hw_mgr_res *hw_mgr_res,
+ uint32_t cmd_type,
+ uint32_t hw_cmd_type,
+ uint32_t base_idx,
+ uint32_t *cmd_buf_addr,
+ uint32_t kmd_buf_remain_size,
+ void *cmd_update_data,
+ uint32_t *bytes_used)
+{
+ int rc = 0;
+ struct cam_isp_resource_node *res;
+ struct cam_isp_hw_get_cmd_update cmd_update;
+ uint32_t i;
+ uint32_t total_used_bytes = 0;
+
+ if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+ CAM_ERR(CAM_ISP, "io res id:%d not valid",
+ hw_mgr_res->res_type);
+ return -EINVAL;
+ }
+
+ cmd_update.cmd_type = hw_cmd_type;
+ cmd_update.cmd.cmd_buf_addr = cmd_buf_addr;
+ cmd_update.cmd.size = kmd_buf_remain_size;
+ cmd_update.cmd.used_bytes = 0;
+ cmd_update.data = cmd_update_data;
+ CAM_DBG(CAM_ISP, "cmd_type %u cmd buffer 0x%pK, size %d",
+ cmd_update.cmd_type,
+ cmd_update.cmd.cmd_buf_addr,
+ cmd_update.cmd.size);
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->hw_res[i]->hw_intf->hw_idx != base_idx)
+ continue;
+
+ res = hw_mgr_res->hw_res[i];
+ cmd_update.res = res;
+
+ rc = res->hw_intf->hw_ops.process_cmd(
+ res->hw_intf->hw_priv,
+ cmd_update.cmd_type, &cmd_update,
+ sizeof(struct cam_isp_hw_get_cmd_update));
+
+ if (rc) {
+ CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+ res->res_id);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ total_used_bytes += cmd_update.cmd.used_bytes;
+ }
+ *bytes_used = total_used_bytes;
+ CAM_DBG(CAM_ISP, "total_used_bytes %u", total_used_bytes);
+ return rc;
+}
+
+int cam_isp_add_command_buffers(
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_kmd_buf_info *kmd_buf_info,
+ struct ctx_base_info *base_info,
+ cam_packet_generic_blob_handler blob_handler_cb,
+ struct cam_ife_hw_mgr_res *res_list_isp_out,
+ uint32_t size_isp_out)
+{
+ int rc = 0;
+ uint32_t cmd_meta_data, num_ent, i;
+ uint32_t base_idx;
+ enum cam_isp_hw_split_id split_id;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct cam_hw_update_entry *hw_entry;
+
+ hw_entry = prepare->hw_update_entries;
+ split_id = base_info->split_id;
+ base_idx = base_info->idx;
+
+ /*
+ * set the cmd_desc to point the first command descriptor in the
+ * packet
+ */
+ cmd_desc = (struct cam_cmd_buf_desc *)
+ ((uint8_t *)&prepare->packet->payload +
+ prepare->packet->cmd_buf_offset);
+
+ CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d",
+ split_id, prepare->packet->num_cmd_buf);
+
+ for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+ num_ent = prepare->num_hw_update_entries;
+ if (!cmd_desc[i].length)
+ continue;
+
+ /* One hw entry space required for left or right or common */
+ if (num_ent + 1 >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+ if (rc)
+ return rc;
+
+ cmd_meta_data = cmd_desc[i].meta_data;
+
+ CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d",
+ cmd_meta_data, split_id);
+
+ switch (cmd_meta_data) {
+ case CAM_ISP_PACKET_META_BASE:
+ case CAM_ISP_PACKET_META_LEFT:
+ case CAM_ISP_PACKET_META_DMI_LEFT:
+ if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+ hw_entry[num_ent].len = cmd_desc[i].length;
+ hw_entry[num_ent].handle =
+ cmd_desc[i].mem_handle;
+ hw_entry[num_ent].offset = cmd_desc[i].offset;
+ CAM_DBG(CAM_ISP,
+ "Meta_Left num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ hw_entry[num_ent].handle,
+ hw_entry[num_ent].len,
+ hw_entry[num_ent].offset);
+
+ if (cmd_meta_data ==
+ CAM_ISP_PACKET_META_DMI_LEFT)
+ hw_entry[num_ent].flags = 0x1;
+
+ num_ent++;
+ }
+ break;
+ case CAM_ISP_PACKET_META_RIGHT:
+ case CAM_ISP_PACKET_META_DMI_RIGHT:
+ if (split_id == CAM_ISP_HW_SPLIT_RIGHT) {
+ hw_entry[num_ent].len = cmd_desc[i].length;
+ hw_entry[num_ent].handle =
+ cmd_desc[i].mem_handle;
+ hw_entry[num_ent].offset = cmd_desc[i].offset;
+ CAM_DBG(CAM_ISP,
+ "Meta_Right num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ hw_entry[num_ent].handle,
+ hw_entry[num_ent].len,
+ hw_entry[num_ent].offset);
+
+ if (cmd_meta_data ==
+ CAM_ISP_PACKET_META_DMI_RIGHT)
+ hw_entry[num_ent].flags = 0x1;
+ num_ent++;
+ }
+ break;
+ case CAM_ISP_PACKET_META_COMMON:
+ case CAM_ISP_PACKET_META_DMI_COMMON:
+ hw_entry[num_ent].len = cmd_desc[i].length;
+ hw_entry[num_ent].handle =
+ cmd_desc[i].mem_handle;
+ hw_entry[num_ent].offset = cmd_desc[i].offset;
+ CAM_DBG(CAM_ISP,
+ "Meta_Common num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ hw_entry[num_ent].handle,
+ hw_entry[num_ent].len,
+ hw_entry[num_ent].offset);
+ if (cmd_meta_data == CAM_ISP_PACKET_META_DMI_COMMON)
+ hw_entry[num_ent].flags = 0x1;
+
+ num_ent++;
+ break;
+ case CAM_ISP_PACKET_META_DUAL_CONFIG:
+ rc = cam_isp_update_dual_config(prepare,
+ &cmd_desc[i], split_id, base_idx,
+ res_list_isp_out, size_isp_out);
+
+ if (rc)
+ return rc;
+ break;
+ case CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT:
+ if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+ struct cam_isp_generic_blob_info blob_info;
+
+ prepare->num_hw_update_entries = num_ent;
+ blob_info.prepare = prepare;
+ blob_info.base_info = base_info;
+ blob_info.kmd_buf_info = kmd_buf_info;
+
+ rc = cam_packet_util_process_generic_cmd_buffer(
+ &cmd_desc[i],
+ blob_handler_cb,
+ &blob_info);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in processing blobs %d",
+ rc);
+ return rc;
+ }
+ num_ent = prepare->num_hw_update_entries;
+ }
+ break;
+ case CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT:
+ if (split_id == CAM_ISP_HW_SPLIT_RIGHT) {
+ struct cam_isp_generic_blob_info blob_info;
+
+ prepare->num_hw_update_entries = num_ent;
+ blob_info.prepare = prepare;
+ blob_info.base_info = base_info;
+ blob_info.kmd_buf_info = kmd_buf_info;
+
+ rc = cam_packet_util_process_generic_cmd_buffer(
+ &cmd_desc[i],
+ blob_handler_cb,
+ &blob_info);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in processing blobs %d",
+ rc);
+ return rc;
+ }
+ num_ent = prepare->num_hw_update_entries;
+ }
+ break;
+ case CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON: {
+ struct cam_isp_generic_blob_info blob_info;
+
+ prepare->num_hw_update_entries = num_ent;
+ blob_info.prepare = prepare;
+ blob_info.base_info = base_info;
+ blob_info.kmd_buf_info = kmd_buf_info;
+
+ rc = cam_packet_util_process_generic_cmd_buffer(
+ &cmd_desc[i],
+ blob_handler_cb,
+ &blob_info);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in processing blobs %d", rc);
+ return rc;
+ }
+ num_ent = prepare->num_hw_update_entries;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "invalid cdm command meta data %d",
+ cmd_meta_data);
+ return -EINVAL;
+ }
+ prepare->num_hw_update_entries = num_ent;
+ }
+
+ return rc;
+}
+
+int cam_isp_add_io_buffers(
+ int iommu_hdl,
+ int sec_iommu_hdl,
+ struct cam_hw_prepare_update_args *prepare,
+ uint32_t base_idx,
+ struct cam_kmd_buf_info *kmd_buf_info,
+ struct cam_ife_hw_mgr_res *res_list_isp_out,
+ uint32_t size_isp_out,
+ bool fill_fence)
+{
+ int rc = 0;
+ uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+ struct cam_buf_io_cfg *io_cfg;
+ struct cam_isp_resource_node *res;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_isp_hw_get_cmd_update update_buf;
+ struct cam_isp_hw_get_wm_update wm_update;
+ struct cam_hw_fence_map_entry *out_map_entries;
+ struct cam_hw_fence_map_entry *in_map_entries;
+ uint32_t kmd_buf_remain_size;
+ uint32_t i, j, num_out_buf, num_in_buf;
+ uint32_t res_id_out, res_id_in, plane_id;
+ uint32_t io_cfg_used_bytes, num_ent;
+ size_t size;
+ int32_t hdl;
+ int mmu_hdl;
+ bool mode, is_buf_secure;
+
+ io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+ &prepare->packet->payload +
+ prepare->packet->io_configs_offset);
+ num_out_buf = 0;
+ num_in_buf = 0;
+ io_cfg_used_bytes = 0;
+ prepare->pf_data->packet = prepare->packet;
+
+ /* Max one hw entries required for each base */
+ if (prepare->num_hw_update_entries + 1 >=
+ prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ prepare->num_hw_update_entries,
+ prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < prepare->packet->num_io_configs; i++) {
+ CAM_DBG(CAM_ISP, "======= io config idx %d ============", i);
+ CAM_DBG(CAM_REQ,
+ "i %d req_id %llu resource_type:%d fence:%d direction %d",
+ i, prepare->packet->header.request_id,
+ io_cfg[i].resource_type, io_cfg[i].fence,
+ io_cfg[i].direction);
+ CAM_DBG(CAM_ISP, "format: %d", io_cfg[i].format);
+
+ if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
+ res_id_out = io_cfg[i].resource_type & 0xFF;
+ if (res_id_out >= size_isp_out) {
+ CAM_ERR(CAM_ISP, "invalid out restype:%x",
+ io_cfg[i].resource_type);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP,
+ "configure output io with fill fence %d",
+ fill_fence);
+ out_map_entries =
+ &prepare->out_map_entries[num_out_buf];
+ if (fill_fence) {
+ if (num_out_buf <
+ prepare->max_out_map_entries) {
+ out_map_entries->resource_handle =
+ io_cfg[i].resource_type;
+ out_map_entries->sync_id =
+ io_cfg[i].fence;
+ num_out_buf++;
+ } else {
+ CAM_ERR(CAM_ISP, "ln_out:%d max_ln:%d",
+ num_out_buf,
+ prepare->max_out_map_entries);
+ return -EINVAL;
+ }
+ }
+
+ hw_mgr_res = &res_list_isp_out[res_id_out];
+ if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+ CAM_ERR(CAM_ISP, "io res id:%d not valid",
+ io_cfg[i].resource_type);
+ return -EINVAL;
+ }
+ } else if (io_cfg[i].direction == CAM_BUF_INPUT) {
+ res_id_in = io_cfg[i].resource_type & 0xFF;
+ CAM_DBG(CAM_ISP,
+ "configure input io with fill fence %d",
+ fill_fence);
+ in_map_entries =
+ &prepare->in_map_entries[num_in_buf];
+ if (fill_fence) {
+ if (num_in_buf < prepare->max_in_map_entries) {
+ in_map_entries->resource_handle =
+ io_cfg[i].resource_type;
+ in_map_entries->sync_id =
+ io_cfg[i].fence;
+ num_in_buf++;
+ } else {
+ CAM_ERR(CAM_ISP, "ln_in:%d imax_ln:%d",
+ num_in_buf,
+ prepare->max_in_map_entries);
+ return -EINVAL;
+ }
+ }
+ continue;
+ } else {
+ CAM_ERR(CAM_ISP, "Invalid io config direction :%d",
+ io_cfg[i].direction);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "setup mem io");
+ for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+ if (!hw_mgr_res->hw_res[j])
+ continue;
+
+ if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
+ continue;
+
+ res = hw_mgr_res->hw_res[j];
+ if (res->res_id != io_cfg[i].resource_type) {
+ CAM_ERR(CAM_ISP,
+ "wm err res id:%d io res id:%d",
+ res->res_id, io_cfg[i].resource_type);
+ return -EINVAL;
+ }
+
+ memset(io_addr, 0, sizeof(io_addr));
+
+ for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
+ plane_id++) {
+ if (!io_cfg[i].mem_handle[plane_id])
+ break;
+
+ hdl = io_cfg[i].mem_handle[plane_id];
+ if (res->process_cmd(res,
+ CAM_ISP_HW_CMD_GET_SECURE_MODE,
+ &mode,
+ sizeof(bool)))
+ return -EINVAL;
+
+ is_buf_secure = cam_mem_is_secure_buf(hdl);
+ if ((mode == CAM_SECURE_MODE_SECURE) &&
+ is_buf_secure) {
+ mmu_hdl = sec_iommu_hdl;
+ } else if (
+ (mode == CAM_SECURE_MODE_NON_SECURE) &&
+ (!is_buf_secure)) {
+ mmu_hdl = iommu_hdl;
+ } else {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Invalid hdl: port mode[%u], buf mode[%u]",
+ mode, is_buf_secure);
+ return -EINVAL;
+ }
+
+ rc = cam_mem_get_io_buf(
+ io_cfg[i].mem_handle[plane_id],
+ mmu_hdl, &io_addr[plane_id], &size);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "no io addr for plane%d",
+ plane_id);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ /* need to update with offset */
+ io_addr[plane_id] +=
+ io_cfg[i].offsets[plane_id];
+ CAM_DBG(CAM_ISP,
+ "get io_addr for plane %d: 0x%llx, mem_hdl=0x%x",
+ plane_id, io_addr[plane_id],
+ io_cfg[i].mem_handle[plane_id]);
+
+ CAM_DBG(CAM_ISP,
+ "mmu_hdl=0x%x, size=%d, end=0x%x",
+ mmu_hdl, (int)size,
+ io_addr[plane_id]+size);
+
+ }
+ if (!plane_id) {
+ CAM_ERR(CAM_ISP, "No valid planes for res%d",
+ res->res_id);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) <
+ kmd_buf_info->size) {
+ kmd_buf_remain_size = kmd_buf_info->size -
+ (kmd_buf_info->used_bytes +
+ io_cfg_used_bytes);
+ } else {
+ CAM_ERR(CAM_ISP,
+ "no free kmd memory for base %d",
+ base_idx);
+ rc = -ENOMEM;
+ return rc;
+ }
+ update_buf.res = res;
+ update_buf.cmd_type = CAM_ISP_HW_CMD_GET_BUF_UPDATE;
+ update_buf.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes/4 +
+ io_cfg_used_bytes/4;
+ wm_update.image_buf = io_addr;
+ wm_update.num_buf = plane_id;
+ wm_update.io_cfg = &io_cfg[i];
+ update_buf.cmd.size = kmd_buf_remain_size;
+ update_buf.wm_update = &wm_update;
+
+ CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
+ update_buf.cmd.cmd_buf_addr,
+ update_buf.cmd.size);
+ rc = res->hw_intf->hw_ops.process_cmd(
+ res->hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_GET_BUF_UPDATE, &update_buf,
+ sizeof(struct cam_isp_hw_get_cmd_update));
+
+ if (rc) {
+ CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+ res->res_id);
+ rc = -ENOMEM;
+ return rc;
+ }
+ io_cfg_used_bytes += update_buf.cmd.used_bytes;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "io_cfg_used_bytes %d, fill_fence %d",
+ io_cfg_used_bytes, fill_fence);
+ if (io_cfg_used_bytes) {
+ /* Update the HW entries */
+ num_ent = prepare->num_hw_update_entries;
+ prepare->hw_update_entries[num_ent].handle =
+ kmd_buf_info->handle;
+ prepare->hw_update_entries[num_ent].len = io_cfg_used_bytes;
+ prepare->hw_update_entries[num_ent].offset =
+ kmd_buf_info->offset;
+ CAM_DBG(CAM_ISP,
+ "num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ prepare->hw_update_entries[num_ent].handle,
+ prepare->hw_update_entries[num_ent].len,
+ prepare->hw_update_entries[num_ent].offset);
+ num_ent++;
+
+ kmd_buf_info->used_bytes += io_cfg_used_bytes;
+ kmd_buf_info->offset += io_cfg_used_bytes;
+ prepare->num_hw_update_entries = num_ent;
+ }
+
+ if (fill_fence) {
+ prepare->num_out_map_entries = num_out_buf;
+ prepare->num_in_map_entries = num_in_buf;
+ }
+
+ return rc;
+}
+
+
+int cam_isp_add_reg_update(
+ struct cam_hw_prepare_update_args *prepare,
+ struct list_head *res_list_isp_src,
+ uint32_t base_idx,
+ struct cam_kmd_buf_info *kmd_buf_info)
+{
+ int rc = -EINVAL;
+ struct cam_isp_resource_node *res;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_update_entry *hw_entry;
+ struct cam_isp_hw_get_cmd_update get_regup;
+ uint32_t kmd_buf_remain_size, num_ent, i, reg_update_size;
+
+ hw_entry = prepare->hw_update_entries;
+ /* Max one hw entries required for each base */
+ if (prepare->num_hw_update_entries + 1 >=
+ prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ prepare->num_hw_update_entries,
+ prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ reg_update_size = 0;
+ list_for_each_entry(hw_mgr_res, res_list_isp_src, list) {
+ if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ continue;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ res = hw_mgr_res->hw_res[i];
+ if (res->hw_intf->hw_idx != base_idx)
+ continue;
+
+ if (kmd_buf_info->size > (kmd_buf_info->used_bytes +
+ reg_update_size)) {
+ kmd_buf_remain_size = kmd_buf_info->size -
+ (kmd_buf_info->used_bytes +
+ reg_update_size);
+ } else {
+ CAM_ERR(CAM_ISP, "no free mem %d %d %d",
+ base_idx, kmd_buf_info->size,
+ kmd_buf_info->used_bytes +
+ reg_update_size);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ get_regup.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes/4 +
+ reg_update_size/4;
+ get_regup.cmd.size = kmd_buf_remain_size;
+ get_regup.cmd_type = CAM_ISP_HW_CMD_GET_REG_UPDATE;
+ get_regup.res = res;
+
+ rc = res->hw_intf->hw_ops.process_cmd(
+ res->hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_GET_REG_UPDATE, &get_regup,
+ sizeof(struct cam_isp_hw_get_cmd_update));
+ if (rc)
+ return rc;
+
+ CAM_DBG(CAM_ISP, "Reg update added for res %d hw_id %d",
+ res->res_id, res->hw_intf->hw_idx);
+ reg_update_size += get_regup.cmd.used_bytes;
+ }
+ }
+
+ if (reg_update_size) {
+ /* Update the HW entries */
+ num_ent = prepare->num_hw_update_entries;
+ prepare->hw_update_entries[num_ent].handle =
+ kmd_buf_info->handle;
+ prepare->hw_update_entries[num_ent].len = reg_update_size;
+ prepare->hw_update_entries[num_ent].offset =
+ kmd_buf_info->offset;
+ CAM_DBG(CAM_ISP,
+ "num_ent=%d handle=0x%x, len=%u, offset=%u",
+ num_ent,
+ prepare->hw_update_entries[num_ent].handle,
+ prepare->hw_update_entries[num_ent].len,
+ prepare->hw_update_entries[num_ent].offset);
+ num_ent++;
+
+ kmd_buf_info->used_bytes += reg_update_size;
+ kmd_buf_info->offset += reg_update_size;
+ prepare->num_hw_update_entries = num_ent;
+ /* reg update is success return status 0 */
+ rc = 0;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
new file mode 100644
index 000000000000..261e191b66f9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -0,0 +1,331 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/ratelimit.h>
+#include "cam_tasklet_util.h"
+#include "cam_irq_controller.h"
+#include "cam_debug_util.h"
+
+#define CAM_TASKLETQ_SIZE 256
+
+static void cam_tasklet_action(unsigned long data);
+
+/**
+ * struct cam_tasklet_queue_cmd:
+ * @Brief: Structure associated with each slot in the
+ * tasklet queue
+ *
+ * @list: list_head member for each entry in queue
+ * @payload: Payload structure for the event. This will be
+ * passed to the handler function
+ * @bottom_half_handler: Function pointer for event handler in bottom
+ * half context
+ *
+ */
+struct cam_tasklet_queue_cmd {
+ struct list_head list;
+ void *payload;
+ CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler;
+};
+
+/**
+ * struct cam_tasklet_info:
+ * @Brief: Tasklet private structure
+ *
+ * @list: list_head member for each tasklet
+ * @index: Instance id for the tasklet
+ * @tasklet_lock: Spin lock
+ * @tasklet_active: Atomic variable to control tasklet state
+ * @tasklet: Tasklet structure used to schedule bottom half
+ * @free_cmd_list: List of free tasklet queue cmd for use
+ * @used_cmd_list: List of used tasklet queue cmd
+ * @cmd_queue: Array of tasklet cmd for storage
+ * @ctx_priv: Private data passed to the handling function
+ *
+ */
+struct cam_tasklet_info {
+ struct list_head list;
+ uint32_t index;
+ spinlock_t tasklet_lock;
+ atomic_t tasklet_active;
+ struct tasklet_struct tasklet;
+
+ struct list_head free_cmd_list;
+ struct list_head used_cmd_list;
+ struct cam_tasklet_queue_cmd cmd_queue[CAM_TASKLETQ_SIZE];
+
+ void *ctx_priv;
+};
+
+struct cam_irq_bh_api tasklet_bh_api = {
+ .bottom_half_enqueue_func = cam_tasklet_enqueue_cmd,
+ .get_bh_payload_func = cam_tasklet_get_cmd,
+ .put_bh_payload_func = cam_tasklet_put_cmd,
+};
+
+int cam_tasklet_get_cmd(
+ void *bottom_half,
+ void **bh_cmd)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct cam_tasklet_info *tasklet = bottom_half;
+ struct cam_tasklet_queue_cmd *tasklet_cmd = NULL;
+
+ *bh_cmd = NULL;
+
+ if (tasklet == NULL) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "tasklet is NULL");
+ return -EINVAL;
+ }
+
+ if (!atomic_read(&tasklet->tasklet_active)) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet is not active");
+ rc = -EPIPE;
+ return rc;
+ }
+
+ spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+ if (list_empty(&tasklet->free_cmd_list)) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No more free tasklet cmd");
+ rc = -ENODEV;
+ goto spin_unlock;
+ } else {
+ tasklet_cmd = list_first_entry(&tasklet->free_cmd_list,
+ struct cam_tasklet_queue_cmd, list);
+ list_del_init(&(tasklet_cmd)->list);
+ *bh_cmd = tasklet_cmd;
+ }
+
+spin_unlock:
+ spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+ return rc;
+}
+
+void cam_tasklet_put_cmd(
+ void *bottom_half,
+ void **bh_cmd)
+{
+ unsigned long flags;
+ struct cam_tasklet_info *tasklet = bottom_half;
+ struct cam_tasklet_queue_cmd *tasklet_cmd = *bh_cmd;
+
+ if (tasklet == NULL) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "tasklet is NULL");
+ return;
+ }
+
+ if (tasklet_cmd == NULL) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid tasklet_cmd");
+ return;
+ }
+
+ spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+ list_del_init(&tasklet_cmd->list);
+ list_add_tail(&tasklet_cmd->list, &tasklet->free_cmd_list);
+ *bh_cmd = NULL;
+ spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+}
+
+/**
+ * cam_tasklet_dequeue_cmd()
+ *
+ * @brief: Initialize the tasklet info structure
+ *
+ * @hw_mgr_ctx: Private Ctx data that will be passed to the handler
+ * function
+ * @idx: Index of tasklet used as identity
+ * @tasklet_action: Tasklet callback function that will be called
+ * when tasklet runs on CPU
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+static int cam_tasklet_dequeue_cmd(
+ struct cam_tasklet_info *tasklet,
+ struct cam_tasklet_queue_cmd **tasklet_cmd)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ *tasklet_cmd = NULL;
+
+ CAM_DBG(CAM_ISP, "Dequeue before lock.");
+ spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+ if (list_empty(&tasklet->used_cmd_list)) {
+ CAM_DBG(CAM_ISP, "End of list reached. Exit");
+ rc = -ENODEV;
+ goto spin_unlock;
+ } else {
+ *tasklet_cmd = list_first_entry(&tasklet->used_cmd_list,
+ struct cam_tasklet_queue_cmd, list);
+ list_del_init(&(*tasklet_cmd)->list);
+ CAM_DBG(CAM_ISP, "Dequeue Successful");
+ }
+
+spin_unlock:
+ spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+ return rc;
+}
+
+void cam_tasklet_enqueue_cmd(
+ void *bottom_half,
+ void *bh_cmd,
+ void *handler_priv,
+ void *evt_payload_priv,
+ CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler)
+{
+ unsigned long flags;
+ struct cam_tasklet_queue_cmd *tasklet_cmd = bh_cmd;
+ struct cam_tasklet_info *tasklet = bottom_half;
+
+ if (!bottom_half) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "NULL bottom half");
+ return;
+ }
+
+ if (!bh_cmd) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "NULL bh cmd");
+ return;
+ }
+
+ if (!atomic_read(&tasklet->tasklet_active)) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet is not active\n");
+ return;
+ }
+
+ CAM_DBG(CAM_ISP, "Enqueue tasklet cmd");
+ tasklet_cmd->bottom_half_handler = bottom_half_handler;
+ tasklet_cmd->payload = evt_payload_priv;
+ spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+ list_add_tail(&tasklet_cmd->list,
+ &tasklet->used_cmd_list);
+ spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+ tasklet_schedule(&tasklet->tasklet);
+}
+
+int cam_tasklet_init(
+ void **tasklet_info,
+ void *hw_mgr_ctx,
+ uint32_t idx)
+{
+ int i;
+ struct cam_tasklet_info *tasklet = NULL;
+
+ tasklet = kzalloc(sizeof(struct cam_tasklet_info), GFP_KERNEL);
+ if (!tasklet) {
+ CAM_DBG(CAM_ISP,
+ "Error! Unable to allocate memory for tasklet");
+ *tasklet_info = NULL;
+ return -ENOMEM;
+ }
+
+ tasklet->ctx_priv = hw_mgr_ctx;
+ tasklet->index = idx;
+ spin_lock_init(&tasklet->tasklet_lock);
+ memset(tasklet->cmd_queue, 0, sizeof(tasklet->cmd_queue));
+ INIT_LIST_HEAD(&tasklet->free_cmd_list);
+ INIT_LIST_HEAD(&tasklet->used_cmd_list);
+ for (i = 0; i < CAM_TASKLETQ_SIZE; i++) {
+ INIT_LIST_HEAD(&tasklet->cmd_queue[i].list);
+ list_add_tail(&tasklet->cmd_queue[i].list,
+ &tasklet->free_cmd_list);
+ }
+ tasklet_init(&tasklet->tasklet, cam_tasklet_action,
+ (unsigned long)tasklet);
+ tasklet_disable(&tasklet->tasklet);
+
+ *tasklet_info = tasklet;
+
+ return 0;
+}
+
+void cam_tasklet_deinit(void **tasklet_info)
+{
+ struct cam_tasklet_info *tasklet = *tasklet_info;
+
+ if (atomic_read(&tasklet->tasklet_active)) {
+ atomic_set(&tasklet->tasklet_active, 0);
+ tasklet_kill(&tasklet->tasklet);
+ tasklet_disable(&tasklet->tasklet);
+ }
+ kfree(tasklet);
+ *tasklet_info = NULL;
+}
+
+static inline void cam_tasklet_flush(struct cam_tasklet_info *tasklet_info)
+{
+ cam_tasklet_action((unsigned long) tasklet_info);
+}
+
+int cam_tasklet_start(void *tasklet_info)
+{
+ struct cam_tasklet_info *tasklet = tasklet_info;
+ int i = 0;
+
+ if (atomic_read(&tasklet->tasklet_active)) {
+ CAM_ERR(CAM_ISP, "Tasklet already active. idx = %d",
+ tasklet->index);
+ return -EBUSY;
+ }
+
+ /* clean up the command queue first */
+ for (i = 0; i < CAM_TASKLETQ_SIZE; i++) {
+ list_del_init(&tasklet->cmd_queue[i].list);
+ list_add_tail(&tasklet->cmd_queue[i].list,
+ &tasklet->free_cmd_list);
+ }
+
+ atomic_set(&tasklet->tasklet_active, 1);
+
+ tasklet_enable(&tasklet->tasklet);
+
+ return 0;
+}
+
+void cam_tasklet_stop(void *tasklet_info)
+{
+ struct cam_tasklet_info *tasklet = tasklet_info;
+
+ atomic_set(&tasklet->tasklet_active, 0);
+ tasklet_kill(&tasklet->tasklet);
+ tasklet_disable(&tasklet->tasklet);
+ cam_tasklet_flush(tasklet);
+}
+
+/*
+ * cam_tasklet_action()
+ *
+ * @brief: Process function that will be called when tasklet runs
+ * on CPU
+ *
+ * @data: Tasklet Info structure that is passed in tasklet_init
+ *
+ * @return: Void
+ */
+static void cam_tasklet_action(unsigned long data)
+{
+ struct cam_tasklet_info *tasklet_info = NULL;
+ struct cam_tasklet_queue_cmd *tasklet_cmd = NULL;
+
+ tasklet_info = (struct cam_tasklet_info *)data;
+
+ while (!cam_tasklet_dequeue_cmd(tasklet_info, &tasklet_cmd)) {
+ tasklet_cmd->bottom_half_handler(tasklet_info->ctx_priv,
+ tasklet_cmd->payload);
+ cam_tasklet_put_cmd(tasklet_info, (void **)(&tasklet_cmd));
+ }
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
new file mode 100644
index 000000000000..9c1caa03e06c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -0,0 +1,162 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_PARSER_H_
+#define _CAM_ISP_HW_PARSER_H_
+
+#include <linux/types.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_hw_intf.h"
+#include "cam_packet_util.h"
+
+/*
+ * struct cam_isp_generic_blob_info
+ *
+ * @prepare: Payload for prepare command
+ * @ctx_base_info: Base hardware information for the context
+ * @kmd_buf_info: Kmd buffer to store the custom cmd data
+ */
+struct cam_isp_generic_blob_info {
+ struct cam_hw_prepare_update_args *prepare;
+ struct ctx_base_info *base_info;
+ struct cam_kmd_buf_info *kmd_buf_info;
+};
+
+/*
+ * cam_isp_add_change_base()
+ *
+ * @brief Add change base in the hw entries list
+ * processe the isp source list get the change base from
+ * ISP HW instance
+ *
+ * @prepare: Contain the packet and HW update variables
+ * @res_list_isp_src: Resource list for IFE/VFE source
+ * @base_idx: Base or dev index of the IFE/VFE HW instance for
+ * which change change base need to be added
+ * @kmd_buf_info: Kmd buffer to store the change base command
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_isp_add_change_base(
+ struct cam_hw_prepare_update_args *prepare,
+ struct list_head *res_list_isp_src,
+ uint32_t base_idx,
+ struct cam_kmd_buf_info *kmd_buf_info);
+
+/*
+ * cam_isp_add_cmd_buf_update()
+ *
+ * @brief Add command buffer in the HW entries list for given
+ * Blob Data.
+ *
+ * @hw_mgr_res: HW resource to get the update from
+ * @cmd_type: Cmd type to get update for
+ * @hw_cmd_type: HW Cmd type corresponding to cmd_type
+ * @base_idx: Base hardware index
+ * @cmd_buf_addr: Cpu buf addr of kmd scratch buffer
+ * @kmd_buf_remain_size: Remaining size left for cmd buffer update
+ * @cmd_update_data: Data needed by HW to process the cmd and provide
+ * cmd buffer
+ * @bytes_used: Address of the field to be populated with
+ * total bytes used as output to caller
+ *
+ * @return: Negative for Failure
+ * otherwise returns bytes used
+ */
+int cam_isp_add_cmd_buf_update(
+ struct cam_ife_hw_mgr_res *hw_mgr_res,
+ uint32_t cmd_type,
+ uint32_t hw_cmd_type,
+ uint32_t base_idx,
+ uint32_t *cmd_buf_addr,
+ uint32_t kmd_buf_remain_size,
+ void *cmd_update_data,
+ uint32_t *bytes_used);
+
+/*
+ * cam_isp_add_command_buffers()
+ *
+ * @brief Add command buffer in the HW entries list for given
+ * left or right VFE/IFE instance.
+ *
+ * @prepare: Contain the packet and HW update variables
+ * @kmd_buf_info: KMD buffer to store the custom cmd data
+ * @base_info: base hardware information
+ * @blob_handler_cb: Call_back_function for Meta handling
+ * @res_list_isp_out: IFE /VFE out resource list
+ * @size_isp_out: Size of the res_list_isp_out array
+ *
+ * @return: 0 for success
+ * Negative for Failure
+ */
+int cam_isp_add_command_buffers(
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_kmd_buf_info *kmd_buf_info,
+ struct ctx_base_info *base_info,
+ cam_packet_generic_blob_handler blob_handler_cb,
+ struct cam_ife_hw_mgr_res *res_list_isp_out,
+ uint32_t size_isp_out);
+
+/*
+ * cam_isp_add_io_buffers()
+ *
+ * @brief Add io buffer configurations in the HW entries list
+ * processe the io configurations based on the base
+ * index and update the HW entries list
+ *
+ * @iommu_hdl: Iommu handle to get the IO buf from memory manager
+ * @sec_iommu_hdl: Secure iommu handle to get the IO buf from
+ * memory manager
+ * @prepare: Contain the packet and HW update variables
+ * @base_idx: Base or dev index of the IFE/VFE HW instance
+ * @kmd_buf_info: Kmd buffer to store the change base command
+ * @res_list_isp_out: IFE /VFE out resource list
+ * @size_isp_out: Size of the res_list_isp_out array
+ * @fill_fence: If true, Fence map table will be filled
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_isp_add_io_buffers(
+ int iommu_hdl,
+ int sec_iommu_hdl,
+ struct cam_hw_prepare_update_args *prepare,
+ uint32_t base_idx,
+ struct cam_kmd_buf_info *kmd_buf_info,
+ struct cam_ife_hw_mgr_res *res_list_isp_out,
+ uint32_t size_isp_out,
+ bool fill_fence);
+
+/*
+ * cam_isp_add_reg_update()
+ *
+ * @brief Add reg update in the hw entries list
+ * processe the isp source list get the reg update from
+ * ISP HW instance
+ *
+ * @prepare: Contain the packet and HW update variables
+ * @res_list_isp_src: Resource list for IFE/VFE source
+ * @base_idx: Base or dev index of the IFE/VFE HW instance
+ * @kmd_buf_info: Kmd buffer to store the change base command
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_isp_add_reg_update(
+ struct cam_hw_prepare_update_args *prepare,
+ struct list_head *res_list_isp_src,
+ uint32_t base_idx,
+ struct cam_kmd_buf_info *kmd_buf_info);
+
+#endif /*_CAM_ISP_HW_PARSER_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h
new file mode 100644
index 000000000000..8bd93d81f620
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h
@@ -0,0 +1,123 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_TASKLET_UTIL_H_
+#define _CAM_TASKLET_UTIL_H_
+
+#include "cam_irq_controller.h"
+
+/*
+ * cam_tasklet_init()
+ *
+ * @brief: Initialize the tasklet info structure
+ *
+ * @tasklet: Tasklet to initialize
+ * @hw_mgr_ctx: Private Ctx data that will be passed to the handler
+ * function
+ * @idx: Index of tasklet used as identity
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_tasklet_init(
+ void **tasklet,
+ void *hw_mgr_ctx,
+ uint32_t idx);
+
+/*
+ * cam_tasklet_deinit()
+ *
+ * @brief: Deinitialize the tasklet info structure
+ *
+ * @tasklet: Tasklet to deinitialize
+ *
+ * @return: Void
+ */
+void cam_tasklet_deinit(void **tasklet);
+
+/*
+ * cam_tasklet_start()
+ *
+ * @brief: Enable the tasklet to be scheduled and run.
+ * Caller should make sure this function is called
+ * before trying to enqueue.
+ *
+ * @tasklet: Tasklet to start
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_tasklet_start(void *tasklet);
+
+/*
+ * cam_tasklet_stop()
+ *
+ * @brief: Disable the tasklet so it can no longer be scheduled.
+ * Need to enable again to run.
+ *
+ * @tasklet: Tasklet to stop
+ *
+ * @return: Void
+ */
+void cam_tasklet_stop(void *tasklet);
+
+/*
+ * cam_tasklet_enqueue_cmd()
+ *
+ * @brief: Enqueue the tasklet_cmd to used list
+ *
+ * @bottom_half: Tasklet info to enqueue onto
+ * @bh_cmd: Tasklet cmd used to enqueue task
+ * @handler_priv: Private Handler data that will be passed to the
+ * handler function
+ * @evt_payload_priv: Event payload that will be passed to the handler
+ * function
+ * @bottom_half_handler: Callback function that will be called by tasklet
+ * for handling event
+ *
+ * @return: Void
+ */
+void cam_tasklet_enqueue_cmd(
+ void *bottom_half,
+ void *bh_cmd,
+ void *handler_priv,
+ void *evt_payload_priv,
+ CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler);
+
+/**
+ * cam_tasklet_get_cmd()
+ *
+ * @brief: Get free cmd from tasklet
+ *
+ * @bottom_half: Tasklet Info structure to get cmd from
+ * @bh_cmd: Return tasklet_cmd pointer if successful
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_tasklet_get_cmd(void *bottom_half, void **bh_cmd);
+
+/**
+ * cam_tasklet_put_cmd()
+ *
+ * @brief: Put back cmd to free list
+ *
+ * @bottom_half: Tasklet Info structure to put cmd into
+ * @bh_cmd: tasklet_cmd pointer that needs to be put back
+ *
+ * @return: Void
+ */
+void cam_tasklet_put_cmd(void *bottom_half, void **bh_cmd);
+
+extern struct cam_irq_bh_api tasklet_bh_api;
+
+#endif /* _CAM_TASKLET_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile
new file mode 100644
index 000000000000..eeb7ddee97e1
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_irq_controller.o \ No newline at end of file
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
new file mode 100644
index 000000000000..e418fa97081d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -0,0 +1,737 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/ratelimit.h>
+
+#include "cam_io_util.h"
+#include "cam_irq_controller.h"
+#include "cam_debug_util.h"
+
+/**
+ * struct cam_irq_evt_handler:
+ * @Brief: Event handler information
+ *
+ * @priority: Priority level of this event
+ * @evt_bit_mask_arr: evt_bit_mask that has the bits set for IRQs to
+ * subscribe for
+ * @handler_priv: Private data that will be passed to the Top/Bottom
+ * Half handler function
+ * @top_half_handler: Top half Handler callback function
+ * @bottom_half_handler: Bottom half Handler callback function
+ * @bottom_half: Pointer to bottom_half implementation on which to
+ * enqueue the event for further handling
+ * @bottom_half_enqueue_func:
+ * Function used to enqueue the bottom_half event
+ * @list_node: list_head struct used for overall handler List
+ * @th_list_node: list_head struct used for top half handler List
+ */
+struct cam_irq_evt_handler {
+ enum cam_irq_priority_level priority;
+ uint32_t *evt_bit_mask_arr;
+ void *handler_priv;
+ CAM_IRQ_HANDLER_TOP_HALF top_half_handler;
+ CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler;
+ void *bottom_half;
+ struct cam_irq_bh_api irq_bh_api;
+ struct list_head list_node;
+ struct list_head th_list_node;
+ int index;
+};
+
+/**
+ * struct cam_irq_register_obj:
+ * @Brief: Structure containing information related to
+ * a particular register Set
+ *
+ * @index: Index of set in Array
+ * @mask_reg_offset: Offset of IRQ MASK register
+ * @clear_reg_offset: Offset of IRQ CLEAR register
+ * @status_reg_offset: Offset of IRQ STATUS register
+ * @top_half_enable_mask: Array of enabled bit_mask sorted by priority
+ */
+struct cam_irq_register_obj {
+ uint32_t index;
+ uint32_t mask_reg_offset;
+ uint32_t clear_reg_offset;
+ uint32_t status_reg_offset;
+ uint32_t top_half_enable_mask[CAM_IRQ_PRIORITY_MAX];
+};
+
+/**
+ * struct cam_irq_controller:
+ *
+ * @brief: IRQ Controller structure.
+ *
+ * @name: Name of IRQ Controller block
+ * @mem_base: Mapped base address of register space to which
+ * register offsets are added to access registers
+ * @num_registers: Number of sets(mask/clear/status) of IRQ registers
+ * @irq_register_arr: Array of Register object associated with this
+ * Controller
+ * @irq_status_arr: Array of IRQ Status values
+ * @global_clear_offset: Offset of Global IRQ Clear register. This register
+ * contains the BIT that needs to be set for the CLEAR
+ * to take effect
+ * @global_clear_bitmask: Bitmask needed to be used in Global Clear register
+ * for Clear IRQ cmd to take effect
+ * @evt_handler_list_head: List of all event handlers
+ * @th_list_head: List of handlers sorted by priority
+ * @hdl_idx: Unique identity of handler assigned on Subscribe.
+ * Used to Unsubscribe.
+ * @lock: Lock for use by controller
+ */
+struct cam_irq_controller {
+ const char *name;
+ void __iomem *mem_base;
+ uint32_t num_registers;
+ struct cam_irq_register_obj *irq_register_arr;
+ uint32_t *irq_status_arr;
+ uint32_t global_clear_offset;
+ uint32_t global_clear_bitmask;
+ struct list_head evt_handler_list_head;
+ struct list_head th_list_head[CAM_IRQ_PRIORITY_MAX];
+ uint32_t hdl_idx;
+ spinlock_t lock;
+ struct cam_irq_th_payload th_payload;
+};
+
+int cam_irq_controller_deinit(void **irq_controller)
+{
+ struct cam_irq_controller *controller = *irq_controller;
+ struct cam_irq_evt_handler *evt_handler = NULL;
+
+ while (!list_empty(&controller->evt_handler_list_head)) {
+ evt_handler = list_first_entry(
+ &controller->evt_handler_list_head,
+ struct cam_irq_evt_handler, list_node);
+ list_del_init(&evt_handler->list_node);
+ kfree(evt_handler->evt_bit_mask_arr);
+ kfree(evt_handler);
+ }
+
+ kfree(controller->th_payload.evt_status_arr);
+ kfree(controller->irq_status_arr);
+ kfree(controller->irq_register_arr);
+ kfree(controller);
+ *irq_controller = NULL;
+ return 0;
+}
+
+int cam_irq_controller_init(const char *name,
+ void __iomem *mem_base,
+ struct cam_irq_controller_reg_info *register_info,
+ void **irq_controller)
+{
+ struct cam_irq_controller *controller = NULL;
+ int i, rc = 0;
+
+ *irq_controller = NULL;
+
+ if (!register_info->num_registers || !register_info->irq_reg_set ||
+ !name || !mem_base) {
+ CAM_ERR(CAM_IRQ_CTRL, "Invalid parameters");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ controller = kzalloc(sizeof(struct cam_irq_controller), GFP_KERNEL);
+ if (!controller) {
+ CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ Controller");
+ return -ENOMEM;
+ }
+
+ controller->irq_register_arr = kzalloc(register_info->num_registers *
+ sizeof(struct cam_irq_register_obj), GFP_KERNEL);
+ if (!controller->irq_register_arr) {
+ CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ register Arr");
+ rc = -ENOMEM;
+ goto reg_alloc_error;
+ }
+
+ controller->irq_status_arr = kzalloc(register_info->num_registers *
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!controller->irq_status_arr) {
+ CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ status Arr");
+ rc = -ENOMEM;
+ goto status_alloc_error;
+ }
+
+ controller->th_payload.evt_status_arr =
+ kzalloc(register_info->num_registers * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!controller->th_payload.evt_status_arr) {
+ CAM_DBG(CAM_IRQ_CTRL,
+ "Failed to allocate BH payload bit mask Arr");
+ rc = -ENOMEM;
+ goto evt_mask_alloc_error;
+ }
+
+ controller->name = name;
+
+ CAM_DBG(CAM_IRQ_CTRL, "num_registers: %d",
+ register_info->num_registers);
+ for (i = 0; i < register_info->num_registers; i++) {
+ controller->irq_register_arr[i].index = i;
+ controller->irq_register_arr[i].mask_reg_offset =
+ register_info->irq_reg_set[i].mask_reg_offset;
+ controller->irq_register_arr[i].clear_reg_offset =
+ register_info->irq_reg_set[i].clear_reg_offset;
+ controller->irq_register_arr[i].status_reg_offset =
+ register_info->irq_reg_set[i].status_reg_offset;
+ CAM_DBG(CAM_IRQ_CTRL, "i %d mask_reg_offset: 0x%x", i,
+ controller->irq_register_arr[i].mask_reg_offset);
+ CAM_DBG(CAM_IRQ_CTRL, "i %d clear_reg_offset: 0x%x", i,
+ controller->irq_register_arr[i].clear_reg_offset);
+ CAM_DBG(CAM_IRQ_CTRL, "i %d status_reg_offset: 0x%x", i,
+ controller->irq_register_arr[i].status_reg_offset);
+ }
+ controller->num_registers = register_info->num_registers;
+ controller->global_clear_bitmask = register_info->global_clear_bitmask;
+ controller->global_clear_offset = register_info->global_clear_offset;
+ controller->mem_base = mem_base;
+
+ CAM_DBG(CAM_IRQ_CTRL, "global_clear_bitmask: 0x%x",
+ controller->global_clear_bitmask);
+ CAM_DBG(CAM_IRQ_CTRL, "global_clear_offset: 0x%x",
+ controller->global_clear_offset);
+ CAM_DBG(CAM_IRQ_CTRL, "mem_base: %pK",
+ (void __iomem *)controller->mem_base);
+
+ INIT_LIST_HEAD(&controller->evt_handler_list_head);
+ for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++)
+ INIT_LIST_HEAD(&controller->th_list_head[i]);
+
+ spin_lock_init(&controller->lock);
+
+ controller->hdl_idx = 1;
+ *irq_controller = controller;
+
+ return rc;
+
+evt_mask_alloc_error:
+ kfree(controller->irq_status_arr);
+status_alloc_error:
+ kfree(controller->irq_register_arr);
+reg_alloc_error:
+ kfree(controller);
+
+ return rc;
+}
+
+int cam_irq_controller_subscribe_irq(void *irq_controller,
+ enum cam_irq_priority_level priority,
+ uint32_t *evt_bit_mask_arr,
+ void *handler_priv,
+ CAM_IRQ_HANDLER_TOP_HALF top_half_handler,
+ CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler,
+ void *bottom_half,
+ struct cam_irq_bh_api *irq_bh_api)
+{
+ struct cam_irq_controller *controller = irq_controller;
+ struct cam_irq_evt_handler *evt_handler = NULL;
+ int i;
+ int rc = 0;
+ uint32_t irq_mask;
+ unsigned long flags = 0;
+ bool need_lock;
+
+ if (!controller || !handler_priv || !evt_bit_mask_arr) {
+ CAM_ERR(CAM_IRQ_CTRL,
+ "Inval params: ctlr=%pK hdl_priv=%pK bit_mask_arr=%pK",
+ controller, handler_priv, evt_bit_mask_arr);
+ return -EINVAL;
+ }
+
+ if (!top_half_handler) {
+ CAM_ERR(CAM_IRQ_CTRL, "Missing top half handler");
+ return -EINVAL;
+ }
+
+ if (bottom_half_handler &&
+ (!bottom_half || !irq_bh_api)) {
+ CAM_ERR(CAM_IRQ_CTRL,
+ "Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK",
+ bottom_half_handler,
+ bottom_half,
+ irq_bh_api);
+ return -EINVAL;
+ }
+
+ if (irq_bh_api &&
+ (!irq_bh_api->bottom_half_enqueue_func ||
+ !irq_bh_api->get_bh_payload_func ||
+ !irq_bh_api->put_bh_payload_func)) {
+ CAM_ERR(CAM_IRQ_CTRL,
+ "Invalid: enqueue_func=%pK get_bh=%pK put_bh=%pK",
+ irq_bh_api->bottom_half_enqueue_func,
+ irq_bh_api->get_bh_payload_func,
+ irq_bh_api->put_bh_payload_func);
+ return -EINVAL;
+ }
+
+ if (priority >= CAM_IRQ_PRIORITY_MAX) {
+ CAM_ERR(CAM_IRQ_CTRL, "Invalid priority=%u, max=%u", priority,
+ CAM_IRQ_PRIORITY_MAX);
+ return -EINVAL;
+ }
+
+ evt_handler = kzalloc(sizeof(struct cam_irq_evt_handler), GFP_KERNEL);
+ if (!evt_handler) {
+ CAM_DBG(CAM_IRQ_CTRL, "Error allocating hlist_node");
+ return -ENOMEM;
+ }
+
+ evt_handler->evt_bit_mask_arr = kzalloc(sizeof(uint32_t) *
+ controller->num_registers, GFP_KERNEL);
+ if (!evt_handler->evt_bit_mask_arr) {
+ CAM_DBG(CAM_IRQ_CTRL, "Error allocating hlist_node");
+ rc = -ENOMEM;
+ goto free_evt_handler;
+ }
+
+ INIT_LIST_HEAD(&evt_handler->list_node);
+ INIT_LIST_HEAD(&evt_handler->th_list_node);
+
+ for (i = 0; i < controller->num_registers; i++)
+ evt_handler->evt_bit_mask_arr[i] = evt_bit_mask_arr[i];
+
+ evt_handler->priority = priority;
+ evt_handler->handler_priv = handler_priv;
+ evt_handler->top_half_handler = top_half_handler;
+ evt_handler->bottom_half_handler = bottom_half_handler;
+ evt_handler->bottom_half = bottom_half;
+ evt_handler->index = controller->hdl_idx++;
+
+ if (irq_bh_api)
+ evt_handler->irq_bh_api = *irq_bh_api;
+
+ /* Avoid rollover to negative values */
+ if (controller->hdl_idx > 0x3FFFFFFF)
+ controller->hdl_idx = 1;
+
+ need_lock = !in_irq();
+ if (need_lock)
+ spin_lock_irqsave(&controller->lock, flags);
+ for (i = 0; i < controller->num_registers; i++) {
+ controller->irq_register_arr[i].top_half_enable_mask[priority]
+ |= evt_bit_mask_arr[i];
+
+ irq_mask = cam_io_r_mb(controller->mem_base +
+ controller->irq_register_arr[i].mask_reg_offset);
+ irq_mask |= evt_bit_mask_arr[i];
+
+ cam_io_w_mb(irq_mask, controller->mem_base +
+ controller->irq_register_arr[i].mask_reg_offset);
+ }
+
+ list_add_tail(&evt_handler->list_node,
+ &controller->evt_handler_list_head);
+ list_add_tail(&evt_handler->th_list_node,
+ &controller->th_list_head[priority]);
+
+ if (need_lock)
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ return evt_handler->index;
+
+free_evt_handler:
+ kfree(evt_handler);
+ evt_handler = NULL;
+
+ return rc;
+}
+
+int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle)
+{
+ struct cam_irq_controller *controller = irq_controller;
+ struct cam_irq_evt_handler *evt_handler = NULL;
+ struct cam_irq_evt_handler *evt_handler_temp;
+ struct cam_irq_register_obj *irq_register = NULL;
+ enum cam_irq_priority_level priority;
+ unsigned long flags = 0;
+ unsigned int i;
+ uint32_t irq_mask;
+ uint32_t found = 0;
+ int rc = -EINVAL;
+ bool need_lock;
+
+ if (!controller)
+ return rc;
+
+ need_lock = !in_irq();
+ if (need_lock)
+ spin_lock_irqsave(&controller->lock, flags);
+
+ list_for_each_entry_safe(evt_handler, evt_handler_temp,
+ &controller->evt_handler_list_head, list_node) {
+ if (evt_handler->index == handle) {
+ CAM_DBG(CAM_IRQ_CTRL, "enable item %d", handle);
+ found = 1;
+ rc = 0;
+ break;
+ }
+ }
+
+ if (!found) {
+ if (need_lock)
+ spin_unlock_irqrestore(&controller->lock, flags);
+ return rc;
+ }
+
+ priority = evt_handler->priority;
+ for (i = 0; i < controller->num_registers; i++) {
+ irq_register = &controller->irq_register_arr[i];
+ irq_register->top_half_enable_mask[priority] |=
+ evt_handler->evt_bit_mask_arr[i];
+
+ irq_mask = cam_io_r_mb(controller->mem_base +
+ irq_register->mask_reg_offset);
+ irq_mask |= evt_handler->evt_bit_mask_arr[i];
+
+ cam_io_w_mb(irq_mask, controller->mem_base +
+ controller->irq_register_arr[i].mask_reg_offset);
+ }
+ if (need_lock)
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ return rc;
+}
+
+int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
+{
+ struct cam_irq_controller *controller = irq_controller;
+ struct cam_irq_evt_handler *evt_handler = NULL;
+ struct cam_irq_evt_handler *evt_handler_temp;
+ struct cam_irq_register_obj *irq_register;
+ enum cam_irq_priority_level priority;
+ unsigned long flags = 0;
+ unsigned int i;
+ uint32_t irq_mask;
+ uint32_t found = 0;
+ int rc = -EINVAL;
+ bool need_lock;
+
+ if (!controller)
+ return rc;
+
+ need_lock = !in_irq();
+ if (need_lock)
+ spin_lock_irqsave(&controller->lock, flags);
+
+ list_for_each_entry_safe(evt_handler, evt_handler_temp,
+ &controller->evt_handler_list_head, list_node) {
+ if (evt_handler->index == handle) {
+ CAM_DBG(CAM_IRQ_CTRL, "disable item %d", handle);
+ found = 1;
+ rc = 0;
+ break;
+ }
+ }
+
+ if (!found) {
+ if (need_lock)
+ spin_unlock_irqrestore(&controller->lock, flags);
+ return rc;
+ }
+
+ priority = evt_handler->priority;
+ for (i = 0; i < controller->num_registers; i++) {
+ irq_register = &controller->irq_register_arr[i];
+ irq_register->top_half_enable_mask[priority] &=
+ ~(evt_handler->evt_bit_mask_arr[i]);
+
+ irq_mask = cam_io_r_mb(controller->mem_base +
+ irq_register->mask_reg_offset);
+ CAM_DBG(CAM_IRQ_CTRL, "irq_mask 0x%x before disable 0x%x",
+ irq_register->mask_reg_offset, irq_mask);
+ irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
+
+ cam_io_w_mb(irq_mask, controller->mem_base +
+ irq_register->mask_reg_offset);
+ CAM_DBG(CAM_IRQ_CTRL, "irq_mask 0x%x after disable 0x%x",
+ irq_register->mask_reg_offset, irq_mask);
+
+ /* Clear the IRQ bits of this handler */
+ cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
+ controller->mem_base +
+ irq_register->clear_reg_offset);
+
+ if (controller->global_clear_offset)
+ cam_io_w_mb(
+ controller->global_clear_bitmask,
+ controller->mem_base +
+ controller->global_clear_offset);
+ }
+ if (need_lock)
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ return rc;
+}
+
+int cam_irq_controller_unsubscribe_irq(void *irq_controller,
+ uint32_t handle)
+{
+ struct cam_irq_controller *controller = irq_controller;
+ struct cam_irq_evt_handler *evt_handler = NULL;
+ struct cam_irq_evt_handler *evt_handler_temp;
+ struct cam_irq_register_obj *irq_register;
+ enum cam_irq_priority_level priority;
+ uint32_t i;
+ uint32_t found = 0;
+ uint32_t irq_mask;
+ unsigned long flags = 0;
+ int rc = -EINVAL;
+ bool need_lock;
+
+ need_lock = !in_irq();
+ if (need_lock)
+ spin_lock_irqsave(&controller->lock, flags);
+
+ list_for_each_entry_safe(evt_handler, evt_handler_temp,
+ &controller->evt_handler_list_head, list_node) {
+ if (evt_handler->index == handle) {
+ CAM_DBG(CAM_IRQ_CTRL, "unsubscribe item %d", handle);
+ list_del_init(&evt_handler->list_node);
+ list_del_init(&evt_handler->th_list_node);
+ found = 1;
+ rc = 0;
+ break;
+ }
+ }
+
+ priority = evt_handler->priority;
+ if (found) {
+ for (i = 0; i < controller->num_registers; i++) {
+ irq_register = &controller->irq_register_arr[i];
+ irq_register->top_half_enable_mask[priority] &=
+ ~(evt_handler->evt_bit_mask_arr[i]);
+
+ irq_mask = cam_io_r_mb(controller->mem_base +
+ irq_register->mask_reg_offset);
+ irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
+
+ cam_io_w_mb(irq_mask, controller->mem_base +
+ irq_register->mask_reg_offset);
+
+ /* Clear the IRQ bits of this handler */
+ cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
+ controller->mem_base +
+ irq_register->clear_reg_offset);
+ if (controller->global_clear_offset)
+ cam_io_w_mb(
+ controller->global_clear_bitmask,
+ controller->mem_base +
+ controller->global_clear_offset);
+ }
+
+ kfree(evt_handler->evt_bit_mask_arr);
+ kfree(evt_handler);
+ }
+
+ if (need_lock)
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ return rc;
+}
+
+/**
+ * cam_irq_controller_match_bit_mask()
+ *
+ * @Brief: This function checks if any of the enabled IRQ bits
+ * for a certain handler is Set in the Status values
+ * of the controller.
+ *
+ * @controller: IRQ Controller structure
+ * @evt_handler: Event handler structure
+ *
+ * @Return: True: If any interested IRQ Bit is Set
+ * False: Otherwise
+ */
+static bool cam_irq_controller_match_bit_mask(
+ struct cam_irq_controller *controller,
+ struct cam_irq_evt_handler *evt_handler)
+{
+ int i;
+
+ for (i = 0; i < controller->num_registers; i++) {
+ if (evt_handler->evt_bit_mask_arr[i] &
+ controller->irq_status_arr[i])
+ return true;
+ }
+
+ return false;
+}
+
+static void cam_irq_controller_th_processing(
+ struct cam_irq_controller *controller,
+ struct list_head *th_list_head)
+{
+ struct cam_irq_evt_handler *evt_handler = NULL;
+ struct cam_irq_th_payload *th_payload = &controller->th_payload;
+ bool is_irq_match;
+ int rc = -EINVAL;
+ int i;
+ void *bh_cmd = NULL;
+ struct cam_irq_bh_api *irq_bh_api = NULL;
+
+ CAM_DBG(CAM_IRQ_CTRL, "Enter");
+
+ if (list_empty(th_list_head))
+ return;
+
+ list_for_each_entry(evt_handler, th_list_head, th_list_node) {
+ is_irq_match = cam_irq_controller_match_bit_mask(controller,
+ evt_handler);
+
+ if (!is_irq_match)
+ continue;
+
+ CAM_DBG(CAM_IRQ_CTRL, "match found");
+
+ cam_irq_th_payload_init(th_payload);
+ th_payload->handler_priv = evt_handler->handler_priv;
+ th_payload->num_registers = controller->num_registers;
+ for (i = 0; i < controller->num_registers; i++) {
+ th_payload->evt_status_arr[i] =
+ controller->irq_status_arr[i] &
+ evt_handler->evt_bit_mask_arr[i];
+ }
+
+ irq_bh_api = &evt_handler->irq_bh_api;
+ bh_cmd = NULL;
+
+ if (evt_handler->bottom_half_handler) {
+ rc = irq_bh_api->get_bh_payload_func(
+ evt_handler->bottom_half, &bh_cmd);
+ if (rc || !bh_cmd) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Can't get bh payload");
+ continue;
+ }
+ }
+
+ /*
+ * irq_status_arr[0] is dummy argument passed. the entire
+ * status array is passed in th_payload.
+ */
+ if (evt_handler->top_half_handler)
+ rc = evt_handler->top_half_handler(
+ controller->irq_status_arr[0],
+ (void *)th_payload);
+
+ if (rc && bh_cmd) {
+ irq_bh_api->put_bh_payload_func(
+ evt_handler->bottom_half, &bh_cmd);
+ continue;
+ }
+
+ if (evt_handler->bottom_half_handler) {
+ CAM_DBG(CAM_IRQ_CTRL, "Enqueuing bottom half for %s",
+ controller->name);
+ irq_bh_api->bottom_half_enqueue_func(
+ evt_handler->bottom_half,
+ bh_cmd,
+ evt_handler->handler_priv,
+ th_payload->evt_payload_priv,
+ evt_handler->bottom_half_handler);
+ }
+ }
+
+ CAM_DBG(CAM_IRQ_CTRL, "Exit");
+}
+
+irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv)
+{
+ struct cam_irq_controller *controller = priv;
+ uint32_t i = 0;
+
+ if (!controller)
+ return IRQ_NONE;
+
+ for (i = 0; i < controller->num_registers; i++) {
+
+ cam_io_w_mb(0x0, controller->mem_base +
+ controller->irq_register_arr[i].clear_reg_offset);
+ }
+
+ if (controller->global_clear_offset)
+ cam_io_w_mb(controller->global_clear_bitmask,
+ controller->mem_base +
+ controller->global_clear_offset);
+
+ for (i = 0; i < controller->num_registers; i++) {
+ cam_io_w_mb(0x0, controller->mem_base +
+ controller->irq_register_arr[i].mask_reg_offset);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
+{
+ struct cam_irq_controller *controller = priv;
+ struct cam_irq_register_obj *irq_register;
+ bool need_th_processing[CAM_IRQ_PRIORITY_MAX] = {false};
+ int i;
+ int j;
+
+ if (!controller)
+ return IRQ_NONE;
+
+ CAM_DBG(CAM_IRQ_CTRL, "locking controller %pK name %s lock %pK",
+ controller, controller->name, &controller->lock);
+ spin_lock(&controller->lock);
+ for (i = 0; i < controller->num_registers; i++) {
+ irq_register = &controller->irq_register_arr[i];
+ controller->irq_status_arr[i] = cam_io_r_mb(
+ controller->mem_base +
+ controller->irq_register_arr[i].status_reg_offset);
+ cam_io_w_mb(controller->irq_status_arr[i],
+ controller->mem_base +
+ controller->irq_register_arr[i].clear_reg_offset);
+ CAM_DBG(CAM_IRQ_CTRL, "Read irq status%d (0x%x) = 0x%x", i,
+ controller->irq_register_arr[i].status_reg_offset,
+ controller->irq_status_arr[i]);
+ for (j = 0; j < CAM_IRQ_PRIORITY_MAX; j++) {
+ if (irq_register->top_half_enable_mask[j] &
+ controller->irq_status_arr[i])
+ need_th_processing[j] = true;
+ CAM_DBG(CAM_IRQ_CTRL,
+ "i %d j %d need_th_processing = %d",
+ i, j, need_th_processing[j]);
+ }
+ }
+
+ CAM_DBG(CAM_IRQ_CTRL, "Status Registers read Successful");
+
+ if (controller->global_clear_offset)
+ cam_io_w_mb(controller->global_clear_bitmask,
+ controller->mem_base + controller->global_clear_offset);
+
+ CAM_DBG(CAM_IRQ_CTRL, "Status Clear done");
+
+ for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) {
+ if (need_th_processing[i]) {
+ CAM_DBG(CAM_IRQ_CTRL, "Invoke TH processing");
+ cam_irq_controller_th_processing(controller,
+ &controller->th_list_head[i]);
+ }
+ }
+ spin_unlock(&controller->lock);
+ CAM_DBG(CAM_IRQ_CTRL, "unlocked controller %pK name %s lock %pK",
+ controller, controller->name, &controller->lock);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
new file mode 100644
index 000000000000..c3c1e7c936ce
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
@@ -0,0 +1,279 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IRQ_CONTROLLER_H_
+#define _CAM_IRQ_CONTROLLER_H_
+
+#include <linux/interrupt.h>
+
+#define CAM_IRQ_BITS_PER_REGISTER 32
+
+/*
+ * enum cam_irq_priority_level:
+ * @Brief: Priority levels for IRQ events.
+ * Priority_0 events will be serviced before
+ * Priority_1 if they these bits are set in the same
+ * Status Read. And so on upto Priority_4.
+ *
+ * Default Priority is Priority_4.
+ */
+enum cam_irq_priority_level {
+ CAM_IRQ_PRIORITY_0,
+ CAM_IRQ_PRIORITY_1,
+ CAM_IRQ_PRIORITY_2,
+ CAM_IRQ_PRIORITY_3,
+ CAM_IRQ_PRIORITY_4,
+ CAM_IRQ_PRIORITY_MAX,
+};
+
+/*
+ * struct cam_irq_register_set:
+ * @Brief: Structure containing offsets of IRQ related
+ * registers belonging to a Set
+ *
+ * @mask_reg_offset: Offset of IRQ MASK register
+ * @clear_reg_offset: Offset of IRQ CLEAR register
+ * @status_reg_offset: Offset of IRQ STATUS register
+ */
+struct cam_irq_register_set {
+ uint32_t mask_reg_offset;
+ uint32_t clear_reg_offset;
+ uint32_t status_reg_offset;
+};
+
+/*
+ * struct cam_irq_controller_reg_info:
+ * @Brief: Structure describing the IRQ registers
+ *
+ * @num_registers: Number of sets(mask/clear/status) of IRQ registers
+ * @irq_reg_set: Array of Register Set Offsets.
+ * Length of array = num_registers
+ * @global_clear_offset: Offset of Global IRQ Clear register. This register
+ * contains the BIT that needs to be set for the CLEAR
+ * to take effect
+ * @global_clear_bitmask: Bitmask needed to be used in Global Clear register
+ * for Clear IRQ cmd to take effect
+ */
+struct cam_irq_controller_reg_info {
+ uint32_t num_registers;
+ struct cam_irq_register_set *irq_reg_set;
+ uint32_t global_clear_offset;
+ uint32_t global_clear_bitmask;
+};
+
+/*
+ * struct cam_irq_th_payload:
+ * @Brief: Event payload structure. This structure will be
+ * passed to the Top Half handler functions.
+ *
+ * @handler_priv: Private Data of handling object set when
+ * subscribing to IRQ event.
+ * @num_registers: Length of evt_bit_mask Array below
+ * @evt_status_arr: Array of Status bitmask read from registers.
+ * Length of array = num_registers
+ * @evt_payload_priv: Private payload pointer which can be set by Top
+ * Half handler for use in Bottom Half.
+ */
+struct cam_irq_th_payload {
+ void *handler_priv;
+ uint32_t num_registers;
+ uint32_t *evt_status_arr;
+ void *evt_payload_priv;
+};
+
+/*
+ * cam_irq_th_payload_init()
+ *
+ * @brief: Initialize the top half payload structure
+ *
+ * @th_payload: Top Half payload structure to Initialize
+ *
+ * @return: Void
+ */
+static inline void cam_irq_th_payload_init(
+ struct cam_irq_th_payload *th_payload) {
+ th_payload->handler_priv = NULL;
+ th_payload->evt_payload_priv = NULL;
+}
+
+typedef int (*CAM_IRQ_HANDLER_TOP_HALF)(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload);
+
+typedef int (*CAM_IRQ_HANDLER_BOTTOM_HALF)(void *handler_priv,
+ void *evt_payload_priv);
+
+typedef void (*CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC)(void *bottom_half,
+ void *bh_cmd, void *handler_priv, void *evt_payload_priv,
+ CAM_IRQ_HANDLER_BOTTOM_HALF);
+
+typedef int (*CAM_IRQ_GET_TASKLET_PAYLOAD_FUNC)(void *bottom_half,
+ void **bh_cmd);
+
+typedef void (*CAM_IRQ_PUT_TASKLET_PAYLOAD_FUNC)(void *bottom_half,
+ void **bh_cmd);
+
+struct cam_irq_bh_api {
+ CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC bottom_half_enqueue_func;
+ CAM_IRQ_GET_TASKLET_PAYLOAD_FUNC get_bh_payload_func;
+ CAM_IRQ_PUT_TASKLET_PAYLOAD_FUNC put_bh_payload_func;
+};
+
+/*
+ * cam_irq_controller_init()
+ *
+ * @brief: Create and Initialize IRQ Controller.
+ *
+ * @name: Name of IRQ Controller block
+ * @mem_base: Mapped base address of register space to which
+ * register offsets are added to access registers
+ * @register_info: Register Info structure associated with this Controller
+ * @irq_controller: Pointer to IRQ Controller that will be filled if
+ * initialization is successful
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_irq_controller_init(const char *name,
+ void __iomem *mem_base,
+ struct cam_irq_controller_reg_info *register_info,
+ void **irq_controller);
+
+/*
+ * cam_irq_controller_subscribe_irq()
+ *
+ * @brief: Subscribe to certain IRQ events.
+ *
+ * @irq_controller: Pointer to IRQ Controller that controls this event IRQ
+ * @priority: Priority level of these events used if multiple events
+ * are SET in the Status Register
+ * @evt_bit_mask_arr: evt_bit_mask that has the bits set for IRQs to
+ * subscribe for
+ * @handler_priv: Private data that will be passed to the Top/Bottom Half
+ * handler function
+ * @top_half_handler: Top half Handler callback function
+ * @bottom_half_handler: Bottom half Handler callback function
+ * @bottom_half: Pointer to bottom_half implementation on which to
+ * enqueue the event for further handling
+ * @bottom_half_enqueue_func:
+ * Function used to enqueue the bottom_half event
+ *
+ * @return: Positive: Success. Value represents handle which is
+ * to be used to unsubscribe
+ * Negative: Failure
+ */
+int cam_irq_controller_subscribe_irq(void *irq_controller,
+ enum cam_irq_priority_level priority,
+ uint32_t *evt_bit_mask_arr,
+ void *handler_priv,
+ CAM_IRQ_HANDLER_TOP_HALF top_half_handler,
+ CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler,
+ void *bottom_half,
+ struct cam_irq_bh_api *irq_bh_api);
+
+/*
+ * cam_irq_controller_unsubscribe_irq()
+ *
+ * @brief: Unsubscribe to IRQ events previously subscribed to.
+ *
+ * @irq_controller: Pointer to IRQ Controller that controls this event IRQ
+ * @handle: Handle returned on successful subscribe used to
+ * identify the handler object
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_irq_controller_unsubscribe_irq(void *irq_controller,
+ uint32_t handle);
+
+/*
+ * cam_irq_controller_deinit()
+ *
+ * @brief: Deinitialize IRQ Controller.
+ *
+ * @irq_controller: Pointer to IRQ Controller that needs to be
+ * deinitialized
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_irq_controller_deinit(void **irq_controller);
+
+/*
+ * cam_irq_controller_handle_irq()
+ *
+ * @brief: Function that should be registered with the IRQ line.
+ * This is the first function to be called when the IRQ
+ * is fired. It will read the Status register and Clear
+ * the IRQ bits. It will then call the top_half handlers
+ * and enqueue the result to bottom_half.
+ *
+ * @irq_num: Number of IRQ line that was set that lead to this
+ * function being called
+ * @priv: Private data registered with request_irq is passed back
+ * here. This private data should be the irq_controller
+ * structure.
+ *
+ * @return: IRQ_HANDLED/IRQ_NONE
+ */
+irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv);
+
+/*
+ * cam_irq_controller_disable_irq()
+ *
+ * @brief: Disable the interrupts on given controller.
+ * Unsubscribe will disable the IRQ by default, so this is
+ * only needed if between subscribe/unsubscribe there is
+ * need to disable IRQ again
+ *
+ * @irq_controller: Pointer to IRQ Controller that controls the registered
+ * events to it.
+ * @handle: Handle returned on successful subscribe, used to
+ * identify the handler object
+ *
+ * @return: 0: events found and disabled
+ * Negative: events not registered on this controller
+ */
+int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle);
+
+/*
+ * cam_irq_controller_enable_irq()
+ *
+ * @brief: Enable the interrupts on given controller.
+ * Subscribe will enable the IRQ by default, so this is
+ * only needed if between subscribe/unsubscribe there is
+ * need to enable IRQ again
+ *
+ * @irq_controller: Pointer to IRQ Controller that controls the registered
+ * events to it.
+ * @handle: Handle returned on successful subscribe, used to
+ * identify the handler object
+ *
+ * @return: 0: events found and enabled
+ * Negative: events not registered on this controller
+ */
+int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle);
+
+/*
+ * cam_irq_controller_clear_and_mask()
+ *
+ * @brief: This function clears and masks all the irq bits
+ *
+ * @irq_num: Number of IRQ line that was set that lead to this
+ * function being called
+ * @priv: Private data registered with request_irq is passed back
+ * here. This private data should be the irq_controller
+ * structure.
+ *
+ * @return: IRQ_HANDLED/IRQ_NONE
+ */
+irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv);
+#endif /* _CAM_IRQ_CONTROLLER_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
new file mode 100644
index 000000000000..1a0f1ecab7b6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_MGR_INTF_H_
+#define _CAM_ISP_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_hw_mgr_intf.h"
+
+/* MAX IFE instance */
+#define CAM_IFE_HW_NUM_MAX 4
+#define CAM_IFE_RDI_NUM_MAX 4
+
+/**
+ * enum cam_isp_hw_event_type - Collection of the ISP hardware events
+ */
+enum cam_isp_hw_event_type {
+ CAM_ISP_HW_EVENT_ERROR,
+ CAM_ISP_HW_EVENT_SOF,
+ CAM_ISP_HW_EVENT_REG_UPDATE,
+ CAM_ISP_HW_EVENT_EPOCH,
+ CAM_ISP_HW_EVENT_EOF,
+ CAM_ISP_HW_EVENT_DONE,
+ CAM_ISP_HW_EVENT_MAX
+};
+
+
+/**
+ * enum cam_isp_hw_err_type - Collection of the ISP error types for
+ * ISP hardware event CAM_ISP_HW_EVENT_ERROR
+ */
+enum cam_isp_hw_err_type {
+ CAM_ISP_HW_ERROR_NONE,
+ CAM_ISP_HW_ERROR_OVERFLOW,
+ CAM_ISP_HW_ERROR_P2I_ERROR,
+ CAM_ISP_HW_ERROR_VIOLATION,
+ CAM_ISP_HW_ERROR_BUSIF_OVERFLOW,
+ CAM_ISP_HW_ERROR_MAX,
+};
+
+/**
+ * enum cam_isp_hw_stop_cmd - Specify the stop command type
+ */
+enum cam_isp_hw_stop_cmd {
+ CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY,
+ CAM_ISP_HW_STOP_IMMEDIATELY,
+ CAM_ISP_HW_STOP_MAX,
+};
+
+/**
+ * struct cam_isp_stop_args - hardware stop arguments
+ *
+ * @hw_stop_cmd: Hardware stop command type information
+ * @stop_only Send stop only to hw drivers. No Deinit to be
+ * done.
+ *
+ */
+struct cam_isp_stop_args {
+ enum cam_isp_hw_stop_cmd hw_stop_cmd;
+ bool stop_only;
+};
+
+/**
+ * struct cam_isp_start_args - isp hardware start arguments
+ *
+ * @config_args: Hardware configuration commands.
+ * @start_only Send start only to hw drivers. No init to
+ * be done.
+ *
+ */
+struct cam_isp_start_args {
+ struct cam_hw_config_args hw_config;
+ bool start_only;
+};
+
+/**
+ * struct cam_isp_bw_config_internal - Internal Bandwidth configuration
+ *
+ * @usage_type: Usage type (Single/Dual)
+ * @num_rdi: Number of RDI votes
+ * @left_pix_vote: Bandwidth vote for left ISP
+ * @right_pix_vote: Bandwidth vote for right ISP
+ * @rdi_vote: RDI bandwidth requirements
+ */
+
+struct cam_isp_bw_config_internal {
+ uint32_t usage_type;
+ uint32_t num_rdi;
+ struct cam_isp_bw_vote left_pix_vote;
+ struct cam_isp_bw_vote right_pix_vote;
+ struct cam_isp_bw_vote rdi_vote[CAM_IFE_RDI_NUM_MAX];
+};
+
+/**
+ * struct cam_isp_prepare_hw_update_data - hw prepare data
+ *
+ * @packet_opcode_type: Packet header opcode in the packet header
+ * this opcode defines, packet is init packet or
+ * update packet
+ * @bw_config: BW config information
+ * @bw_config_valid: Flag indicating whether the bw_config at the index
+ * is valid or not
+ *
+ */
+struct cam_isp_prepare_hw_update_data {
+ uint32_t packet_opcode_type;
+ struct cam_isp_bw_config_internal bw_config[CAM_IFE_HW_NUM_MAX];
+ bool bw_config_valid[CAM_IFE_HW_NUM_MAX];
+};
+
+
+/**
+ * struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
+ *
+ * @timestamp: Time stamp for the sof event
+ * @boot_time: Boot time stamp for the sof event
+ *
+ */
+struct cam_isp_hw_sof_event_data {
+ uint64_t timestamp;
+ uint64_t boot_time;
+};
+
+/**
+ * struct cam_isp_hw_reg_update_event_data - Event payload for
+ * CAM_HW_EVENT_REG_UPDATE
+ *
+ * @timestamp: Time stamp for the reg update event
+ *
+ */
+struct cam_isp_hw_reg_update_event_data {
+ uint64_t timestamp;
+};
+
+/**
+ * struct cam_isp_hw_epoch_event_data - Event payload for CAM_HW_EVENT_EPOCH
+ *
+ * @timestamp: Time stamp for the epoch event
+ *
+ */
+struct cam_isp_hw_epoch_event_data {
+ uint64_t timestamp;
+};
+
+/**
+ * struct cam_isp_hw_done_event_data - Event payload for CAM_HW_EVENT_DONE
+ *
+ * @num_handles: Number of resource handeles
+ * @resource_handle: Resource handle array
+ * @timestamp: Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_done_event_data {
+ uint32_t num_handles;
+ uint32_t resource_handle[
+ CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+ uint64_t timestamp;
+};
+
+/**
+ * struct cam_isp_hw_eof_event_data - Event payload for CAM_HW_EVENT_EOF
+ *
+ * @timestamp: Timestamp for the eof event
+ *
+ */
+struct cam_isp_hw_eof_event_data {
+ uint64_t timestamp;
+};
+
+/**
+ * struct cam_isp_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR
+ *
+ * @error_type: Error type for the error event
+ * @timestamp: Timestamp for the error event
+ * @recovery_enabled: Identifies if the context needs to recover & reapply
+ * this request
+ */
+struct cam_isp_hw_error_event_data {
+ uint32_t error_type;
+ uint64_t timestamp;
+ bool recovery_enabled;
+};
+
+/* enum cam_isp_hw_mgr_command - Hardware manager command type */
+enum cam_isp_hw_mgr_command {
+ CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT,
+ CAM_ISP_HW_MGR_CMD_PAUSE_HW,
+ CAM_ISP_HW_MGR_CMD_RESUME_HW,
+ CAM_ISP_HW_MGR_CMD_SOF_DEBUG,
+ CAM_ISP_HW_MGR_CMD_MAX,
+};
+
+/**
+ * struct cam_isp_hw_cmd_args - Payload for hw manager command
+ *
+ * @cmd_type HW command type
+ * @get_context Get context type information
+ */
+struct cam_isp_hw_cmd_args {
+ uint32_t cmd_type;
+ union {
+ uint32_t is_rdi_only_context;
+ uint32_t sof_irq_enable;
+ } u;
+};
+
+
+/**
+ * cam_isp_hw_mgr_init()
+ *
+ * @brief: Initialization function for the ISP hardware manager
+ *
+ * @of_node: Device node input
+ * @hw_mgr: Input/output structure for the ISP hardware manager
+ * initialization
+ * @iommu_hdl: Iommu handle to be returned
+ */
+int cam_isp_hw_mgr_init(struct device_node *of_node,
+ struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl);
+
+#endif /* __CAM_ISP_HW_MGR_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/Makefile
new file mode 100644
index 000000000000..4bf4a0e976e1
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += ife_csid_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += vfe_hw/
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
new file mode 100644
index 000000000000..25bdc51d369f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
@@ -0,0 +1,12 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid_dev.o cam_ife_csid_soc.o cam_ife_csid_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid17x.o cam_ife_csid_lite17x.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c
new file mode 100644
index 000000000000..70223f1427f5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c
@@ -0,0 +1,61 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/module.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid170.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_DRV_NAME "csid_170"
+#define CAM_CSID_VERSION_V170 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid170_hw_info = {
+ .csid_reg = &cam_ife_csid_170_reg_offset,
+ .hw_dts_version = CAM_CSID_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid170_dt_match[] = {
+ {
+ .compatible = "qcom,csid170",
+ .data = &cam_ife_csid170_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, cam_ife_csid170_dt_match);
+
+static struct platform_driver cam_ife_csid170_driver = {
+ .probe = cam_ife_csid_probe,
+ .remove = cam_ife_csid_remove,
+ .driver = {
+ .name = CAM_CSID_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cam_ife_csid170_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_ife_csid170_init_module(void)
+{
+ return platform_driver_register(&cam_ife_csid170_driver);
+}
+
+static void __exit cam_ife_csid170_exit_module(void)
+{
+ platform_driver_unregister(&cam_ife_csid170_driver);
+}
+
+module_init(cam_ife_csid170_init_module);
+module_exit(cam_ife_csid170_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
new file mode 100644
index 000000000000..85fec0f15dcd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
@@ -0,0 +1,310 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_170_H_
+#define _CAM_IFE_CSID_170_H_
+
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_pxl_reg_offset cam_ife_csid_170_ipp_reg_offset = {
+ .csid_pxl_irq_status_addr = 0x30,
+ .csid_pxl_irq_mask_addr = 0x34,
+ .csid_pxl_irq_clear_addr = 0x38,
+ .csid_pxl_irq_set_addr = 0x3c,
+
+ .csid_pxl_cfg0_addr = 0x200,
+ .csid_pxl_cfg1_addr = 0x204,
+ .csid_pxl_ctrl_addr = 0x208,
+ .csid_pxl_frm_drop_pattern_addr = 0x20c,
+ .csid_pxl_frm_drop_period_addr = 0x210,
+ .csid_pxl_irq_subsample_pattern_addr = 0x214,
+ .csid_pxl_irq_subsample_period_addr = 0x218,
+ .csid_pxl_hcrop_addr = 0x21c,
+ .csid_pxl_vcrop_addr = 0x220,
+ .csid_pxl_pix_drop_pattern_addr = 0x224,
+ .csid_pxl_pix_drop_period_addr = 0x228,
+ .csid_pxl_line_drop_pattern_addr = 0x22c,
+ .csid_pxl_line_drop_period_addr = 0x230,
+ .csid_pxl_rst_strobes_addr = 0x240,
+ .csid_pxl_status_addr = 0x254,
+ .csid_pxl_misr_val_addr = 0x258,
+ .csid_pxl_format_measure_cfg0_addr = 0x270,
+ .csid_pxl_format_measure_cfg1_addr = 0x274,
+ .csid_pxl_format_measure0_addr = 0x278,
+ .csid_pxl_format_measure1_addr = 0x27c,
+ .csid_pxl_format_measure2_addr = 0x280,
+ .csid_pxl_timestamp_curr0_sof_addr = 0x290,
+ .csid_pxl_timestamp_curr1_sof_addr = 0x294,
+ .csid_pxl_timestamp_perv0_sof_addr = 0x298,
+ .csid_pxl_timestamp_perv1_sof_addr = 0x29c,
+ .csid_pxl_timestamp_curr0_eof_addr = 0x2a0,
+ .csid_pxl_timestamp_curr1_eof_addr = 0x2a4,
+ .csid_pxl_timestamp_perv0_eof_addr = 0x2a8,
+ .csid_pxl_timestamp_perv1_eof_addr = 0x2ac,
+ /* configurations */
+ .pix_store_en_shift_val = 7,
+ .early_eof_en_shift_val = 29,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_0_reg_offset = {
+ .csid_rdi_irq_status_addr = 0x40,
+ .csid_rdi_irq_mask_addr = 0x44,
+ .csid_rdi_irq_clear_addr = 0x48,
+ .csid_rdi_irq_set_addr = 0x4c,
+ .csid_rdi_cfg0_addr = 0x300,
+ .csid_rdi_cfg1_addr = 0x304,
+ .csid_rdi_ctrl_addr = 0x308,
+ .csid_rdi_frm_drop_pattern_addr = 0x30c,
+ .csid_rdi_frm_drop_period_addr = 0x310,
+ .csid_rdi_irq_subsample_pattern_addr = 0x314,
+ .csid_rdi_irq_subsample_period_addr = 0x318,
+ .csid_rdi_rpp_hcrop_addr = 0x31c,
+ .csid_rdi_rpp_vcrop_addr = 0x320,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x324,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x328,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x32c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x330,
+ .csid_rdi_rst_strobes_addr = 0x340,
+ .csid_rdi_status_addr = 0x350,
+ .csid_rdi_misr_val0_addr = 0x354,
+ .csid_rdi_misr_val1_addr = 0x358,
+ .csid_rdi_misr_val2_addr = 0x35c,
+ .csid_rdi_misr_val3_addr = 0x360,
+ .csid_rdi_format_measure_cfg0_addr = 0x370,
+ .csid_rdi_format_measure_cfg1_addr = 0x374,
+ .csid_rdi_format_measure0_addr = 0x378,
+ .csid_rdi_format_measure1_addr = 0x37c,
+ .csid_rdi_format_measure2_addr = 0x380,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x390,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x394,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x398,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x39c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x3a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x3a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x3a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x3ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x3e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_1_reg_offset = {
+ .csid_rdi_irq_status_addr = 0x50,
+ .csid_rdi_irq_mask_addr = 0x54,
+ .csid_rdi_irq_clear_addr = 0x58,
+ .csid_rdi_irq_set_addr = 0x5c,
+ .csid_rdi_cfg0_addr = 0x400,
+ .csid_rdi_cfg1_addr = 0x404,
+ .csid_rdi_ctrl_addr = 0x408,
+ .csid_rdi_frm_drop_pattern_addr = 0x40c,
+ .csid_rdi_frm_drop_period_addr = 0x410,
+ .csid_rdi_irq_subsample_pattern_addr = 0x414,
+ .csid_rdi_irq_subsample_period_addr = 0x418,
+ .csid_rdi_rpp_hcrop_addr = 0x41c,
+ .csid_rdi_rpp_vcrop_addr = 0x420,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x424,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x428,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x42c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x430,
+ .csid_rdi_rst_strobes_addr = 0x440,
+ .csid_rdi_status_addr = 0x450,
+ .csid_rdi_misr_val0_addr = 0x454,
+ .csid_rdi_misr_val1_addr = 0x458,
+ .csid_rdi_misr_val2_addr = 0x45c,
+ .csid_rdi_misr_val3_addr = 0x460,
+ .csid_rdi_format_measure_cfg0_addr = 0x470,
+ .csid_rdi_format_measure_cfg1_addr = 0x474,
+ .csid_rdi_format_measure0_addr = 0x478,
+ .csid_rdi_format_measure1_addr = 0x47c,
+ .csid_rdi_format_measure2_addr = 0x480,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x490,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x494,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x498,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x49c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x4a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x4a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x4a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x4ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x4e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_2_reg_offset = {
+ .csid_rdi_irq_status_addr = 0x60,
+ .csid_rdi_irq_mask_addr = 0x64,
+ .csid_rdi_irq_clear_addr = 0x68,
+ .csid_rdi_irq_set_addr = 0x6c,
+ .csid_rdi_cfg0_addr = 0x500,
+ .csid_rdi_cfg1_addr = 0x504,
+ .csid_rdi_ctrl_addr = 0x508,
+ .csid_rdi_frm_drop_pattern_addr = 0x50c,
+ .csid_rdi_frm_drop_period_addr = 0x510,
+ .csid_rdi_irq_subsample_pattern_addr = 0x514,
+ .csid_rdi_irq_subsample_period_addr = 0x518,
+ .csid_rdi_rpp_hcrop_addr = 0x51c,
+ .csid_rdi_rpp_vcrop_addr = 0x520,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x524,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x528,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x52c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x530,
+ .csid_rdi_yuv_chroma_conversion_addr = 0x534,
+ .csid_rdi_rst_strobes_addr = 0x540,
+ .csid_rdi_status_addr = 0x550,
+ .csid_rdi_misr_val0_addr = 0x554,
+ .csid_rdi_misr_val1_addr = 0x558,
+ .csid_rdi_misr_val2_addr = 0x55c,
+ .csid_rdi_misr_val3_addr = 0x560,
+ .csid_rdi_format_measure_cfg0_addr = 0x570,
+ .csid_rdi_format_measure_cfg1_addr = 0x574,
+ .csid_rdi_format_measure0_addr = 0x578,
+ .csid_rdi_format_measure1_addr = 0x57c,
+ .csid_rdi_format_measure2_addr = 0x580,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x590,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x594,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x598,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x59c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x5a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x5a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x5a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x5ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x5e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+ cam_ife_csid_170_csi2_reg_offset = {
+ .csid_csi2_rx_irq_status_addr = 0x20,
+ .csid_csi2_rx_irq_mask_addr = 0x24,
+ .csid_csi2_rx_irq_clear_addr = 0x28,
+ .csid_csi2_rx_irq_set_addr = 0x2c,
+
+ /*CSI2 rx control */
+ .csid_csi2_rx_cfg0_addr = 0x100,
+ .csid_csi2_rx_cfg1_addr = 0x104,
+ .csid_csi2_rx_capture_ctrl_addr = 0x108,
+ .csid_csi2_rx_rst_strobes_addr = 0x110,
+ .csid_csi2_rx_de_scramble_cfg0_addr = 0x114,
+ .csid_csi2_rx_de_scramble_cfg1_addr = 0x118,
+ .csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr = 0x120,
+ .csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr = 0x124,
+ .csid_csi2_rx_captured_short_pkt_0_addr = 0x128,
+ .csid_csi2_rx_captured_short_pkt_1_addr = 0x12c,
+ .csid_csi2_rx_captured_long_pkt_0_addr = 0x130,
+ .csid_csi2_rx_captured_long_pkt_1_addr = 0x134,
+ .csid_csi2_rx_captured_long_pkt_ftr_addr = 0x138,
+ .csid_csi2_rx_captured_cphy_pkt_hdr_addr = 0x13c,
+ .csid_csi2_rx_lane0_misr_addr = 0x150,
+ .csid_csi2_rx_lane1_misr_addr = 0x154,
+ .csid_csi2_rx_lane2_misr_addr = 0x158,
+ .csid_csi2_rx_lane3_misr_addr = 0x15c,
+ .csid_csi2_rx_total_pkts_rcvd_addr = 0x160,
+ .csid_csi2_rx_stats_ecc_addr = 0x164,
+ .csid_csi2_rx_total_crc_err_addr = 0x168,
+
+ .csi2_rst_srb_all = 0x3FFF,
+ .csi2_rst_done_shift_val = 27,
+ .csi2_irq_mask_all = 0xFFFFFFF,
+ .csi2_misr_enable_shift_val = 6,
+ .csi2_vc_mode_shift_val = 2,
+ .csi2_capture_long_pkt_en_shift = 0,
+ .csi2_capture_short_pkt_en_shift = 1,
+ .csi2_capture_cphy_pkt_en_shift = 2,
+ .csi2_capture_long_pkt_dt_shift = 4,
+ .csi2_capture_long_pkt_vc_shift = 10,
+ .csi2_capture_short_pkt_vc_shift = 15,
+ .csi2_capture_cphy_pkt_dt_shift = 20,
+ .csi2_capture_cphy_pkt_vc_shift = 26,
+};
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+ cam_ife_csid_170_tpg_reg_offset = {
+ /*CSID TPG control */
+ .csid_tpg_ctrl_addr = 0x600,
+ .csid_tpg_vc_cfg0_addr = 0x604,
+ .csid_tpg_vc_cfg1_addr = 0x608,
+ .csid_tpg_lfsr_seed_addr = 0x60c,
+ .csid_tpg_dt_n_cfg_0_addr = 0x610,
+ .csid_tpg_dt_n_cfg_1_addr = 0x614,
+ .csid_tpg_dt_n_cfg_2_addr = 0x618,
+ .csid_tpg_color_bars_cfg_addr = 0x640,
+ .csid_tpg_color_box_cfg_addr = 0x644,
+ .csid_tpg_common_gen_cfg_addr = 0x648,
+ .csid_tpg_cgen_n_cfg_addr = 0x650,
+ .csid_tpg_cgen_n_x0_addr = 0x654,
+ .csid_tpg_cgen_n_x1_addr = 0x658,
+ .csid_tpg_cgen_n_x2_addr = 0x65c,
+ .csid_tpg_cgen_n_xy_addr = 0x660,
+ .csid_tpg_cgen_n_y1_addr = 0x664,
+ .csid_tpg_cgen_n_y2_addr = 0x668,
+
+ /* configurations */
+ .tpg_dtn_cfg_offset = 0xc,
+ .tpg_cgen_cfg_offset = 0x20,
+ .tpg_cpas_ife_reg_offset = 0x28,
+};
+
+static struct cam_ife_csid_common_reg_offset
+ cam_ife_csid_170_cmn_reg_offset = {
+ .csid_hw_version_addr = 0x0,
+ .csid_cfg0_addr = 0x4,
+ .csid_ctrl_addr = 0x8,
+ .csid_reset_addr = 0xc,
+ .csid_rst_strobes_addr = 0x10,
+
+ .csid_test_bus_ctrl_addr = 0x14,
+ .csid_top_irq_status_addr = 0x70,
+ .csid_top_irq_mask_addr = 0x74,
+ .csid_top_irq_clear_addr = 0x78,
+ .csid_top_irq_set_addr = 0x7c,
+ .csid_irq_cmd_addr = 0x80,
+
+ /*configurations */
+ .major_version = 1,
+ .minor_version = 7,
+ .version_incr = 0,
+ .num_rdis = 3,
+ .num_pix = 1,
+ .num_ppp = 0,
+ .csid_rst_stb = 0x1e,
+ .csid_rst_stb_sw_all = 0x1f,
+ .path_rst_stb_all = 0x7f,
+ .path_rst_done_shift_val = 1,
+ .path_en_shift_val = 31,
+ .dt_id_shift_val = 27,
+ .vc_shift_val = 22,
+ .dt_shift_val = 16,
+ .fmt_shift_val = 12,
+ .plain_fmt_shit_val = 10,
+ .crop_v_en_shift_val = 6,
+ .crop_h_en_shift_val = 5,
+ .crop_shift = 16,
+ .ipp_irq_mask_all = 0x7FFF,
+ .rdi_irq_mask_all = 0x7FFF,
+ .ppp_irq_mask_all = 0x0,
+ .measure_en_hbi_vbi_cnt_mask = 0xC,
+ .format_measure_en_val = 1,
+};
+
+static struct cam_ife_csid_reg_offset cam_ife_csid_170_reg_offset = {
+ .cmn_reg = &cam_ife_csid_170_cmn_reg_offset,
+ .csi2_reg = &cam_ife_csid_170_csi2_reg_offset,
+ .ipp_reg = &cam_ife_csid_170_ipp_reg_offset,
+ .ppp_reg = NULL,
+ .rdi_reg = {
+ &cam_ife_csid_170_rdi_0_reg_offset,
+ &cam_ife_csid_170_rdi_1_reg_offset,
+ &cam_ife_csid_170_rdi_2_reg_offset,
+ NULL,
+ },
+ .tpg_reg = &cam_ife_csid_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_170_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c
new file mode 100644
index 000000000000..f894daa846b9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c
@@ -0,0 +1,61 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/module.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid170.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_DRV_NAME "csid_17x"
+#define CAM_CSID_VERSION_V170 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid170_hw_info = {
+ .csid_reg = &cam_ife_csid_170_reg_offset,
+ .hw_dts_version = CAM_CSID_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid17x_dt_match[] = {
+ {
+ .compatible = "qcom,csid170",
+ .data = &cam_ife_csid170_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, cam_ife_csid17x_dt_match);
+
+static struct platform_driver cam_ife_csid17x_driver = {
+ .probe = cam_ife_csid_probe,
+ .remove = cam_ife_csid_remove,
+ .driver = {
+ .name = CAM_CSID_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cam_ife_csid17x_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_ife_csid17x_init_module(void)
+{
+ return platform_driver_register(&cam_ife_csid17x_driver);
+}
+
+static void __exit cam_ife_csid17x_exit_module(void)
+{
+ platform_driver_unregister(&cam_ife_csid17x_driver);
+}
+
+module_init(cam_ife_csid17x_init_module);
+module_exit(cam_ife_csid17x_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID17X driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
new file mode 100644
index 000000000000..f3a15558bb1b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -0,0 +1,3432 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include <uapi/media/cam_defs.h>
+
+#include "cam_ife_csid_core.h"
+#include "cam_isp_hw.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+
+/* Timeout value in msec */
+#define IFE_CSID_TIMEOUT 1000
+
+/* TPG VC/DT values */
+#define CAM_IFE_CSID_TPG_VC_VAL 0xA
+#define CAM_IFE_CSID_TPG_DT_VAL 0x2B
+
+/* Timeout values in usec */
+#define CAM_IFE_CSID_TIMEOUT_SLEEP_US 1000
+#define CAM_IFE_CSID_TIMEOUT_ALL_US 100000
+
+/*
+ * Constant Factors needed to change QTimer ticks to nanoseconds
+ * QTimer Freq = 19.2 MHz
+ * Time(us) = ticks/19.2
+ * Time(ns) = ticks/19.2 * 1000
+ */
+#define CAM_IFE_CSID_QTIMER_MUL_FACTOR 10000
+#define CAM_IFE_CSID_QTIMER_DIV_FACTOR 192
+
+/* Max number of sof irq's triggered in case of SOF freeze */
+#define CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX 12
+
+/* Max CSI Rx irq error count threshold value */
+#define CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT 100
+
+static int cam_ife_csid_is_ipp_ppp_format_supported(
+ uint32_t in_format)
+{
+ int rc = -EINVAL;
+
+ switch (in_format) {
+ case CAM_FORMAT_MIPI_RAW_6:
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_MIPI_RAW_12:
+ case CAM_FORMAT_MIPI_RAW_14:
+ case CAM_FORMAT_MIPI_RAW_16:
+ case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_DPCM_10_6_10:
+ case CAM_FORMAT_DPCM_10_8_10:
+ case CAM_FORMAT_DPCM_12_6_12:
+ case CAM_FORMAT_DPCM_12_8_12:
+ case CAM_FORMAT_DPCM_14_8_14:
+ case CAM_FORMAT_DPCM_14_10_14:
+ rc = 0;
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+static int cam_ife_csid_get_format_rdi(
+ uint32_t in_format, uint32_t out_format,
+ uint32_t *decode_fmt, uint32_t *plain_fmt)
+{
+ int rc = 0;
+
+ switch (in_format) {
+ case CAM_FORMAT_MIPI_RAW_6:
+ switch (out_format) {
+ case CAM_FORMAT_MIPI_RAW_6:
+ *decode_fmt = 0xf;
+ break;
+ case CAM_FORMAT_PLAIN8:
+ *decode_fmt = 0x0;
+ *plain_fmt = 0x0;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_FORMAT_MIPI_RAW_8:
+ switch (out_format) {
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_PLAIN128:
+ *decode_fmt = 0xf;
+ break;
+ case CAM_FORMAT_PLAIN8:
+ *decode_fmt = 0x1;
+ *plain_fmt = 0x0;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_FORMAT_MIPI_RAW_10:
+ switch (out_format) {
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_PLAIN128:
+ *decode_fmt = 0xf;
+ break;
+ case CAM_FORMAT_PLAIN16_10:
+ *decode_fmt = 0x2;
+ *plain_fmt = 0x1;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_FORMAT_MIPI_RAW_12:
+ switch (out_format) {
+ case CAM_FORMAT_MIPI_RAW_12:
+ *decode_fmt = 0xf;
+ break;
+ case CAM_FORMAT_PLAIN16_12:
+ *decode_fmt = 0x3;
+ *plain_fmt = 0x1;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_FORMAT_MIPI_RAW_14:
+ switch (out_format) {
+ case CAM_FORMAT_MIPI_RAW_14:
+ *decode_fmt = 0xf;
+ break;
+ case CAM_FORMAT_PLAIN16_14:
+ *decode_fmt = 0x4;
+ *plain_fmt = 0x1;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_FORMAT_MIPI_RAW_16:
+ switch (out_format) {
+ case CAM_FORMAT_MIPI_RAW_16:
+ *decode_fmt = 0xf;
+ break;
+ case CAM_FORMAT_PLAIN16_16:
+ *decode_fmt = 0x5;
+ *plain_fmt = 0x1;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_FORMAT_MIPI_RAW_20:
+ switch (out_format) {
+ case CAM_FORMAT_MIPI_RAW_20:
+ *decode_fmt = 0xf;
+ break;
+ case CAM_FORMAT_PLAIN32_20:
+ *decode_fmt = 0x6;
+ *plain_fmt = 0x2;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_FORMAT_DPCM_10_6_10:
+ *decode_fmt = 0x7;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_10_8_10:
+ *decode_fmt = 0x8;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_12_6_12:
+ *decode_fmt = 0x9;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_12_8_12:
+ *decode_fmt = 0xA;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_14_8_14:
+ *decode_fmt = 0xB;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_14_10_14:
+ *decode_fmt = 0xC;
+ *plain_fmt = 0x1;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ CAM_ERR(CAM_ISP, "Unsupported format pair in %d out %d",
+ in_format, out_format);
+
+ return rc;
+}
+
+static int cam_ife_csid_get_format_ipp_ppp(
+ uint32_t in_format,
+ uint32_t *decode_fmt, uint32_t *plain_fmt)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_ISP, "input format:%d",
+ in_format);
+
+ switch (in_format) {
+ case CAM_FORMAT_MIPI_RAW_6:
+ *decode_fmt = 0;
+ *plain_fmt = 0;
+ break;
+ case CAM_FORMAT_MIPI_RAW_8:
+ *decode_fmt = 0x1;
+ *plain_fmt = 0;
+ break;
+ case CAM_FORMAT_MIPI_RAW_10:
+ *decode_fmt = 0x2;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_MIPI_RAW_12:
+ *decode_fmt = 0x3;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_MIPI_RAW_14:
+ *decode_fmt = 0x4;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_MIPI_RAW_16:
+ *decode_fmt = 0x5;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_MIPI_RAW_20:
+ *decode_fmt = 0x6;
+ *plain_fmt = 0x2;
+ break;
+ case CAM_FORMAT_DPCM_10_6_10:
+ *decode_fmt = 0x7;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_10_8_10:
+ *decode_fmt = 0x8;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_12_6_12:
+ *decode_fmt = 0x9;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_12_8_12:
+ *decode_fmt = 0xA;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_14_8_14:
+ *decode_fmt = 0xB;
+ *plain_fmt = 0x1;
+ break;
+ case CAM_FORMAT_DPCM_14_10_14:
+ *decode_fmt = 0xC;
+ *plain_fmt = 0x1;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Unsupported format %d",
+ in_format);
+ rc = -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "decode_fmt:%d plain_fmt:%d",
+ *decode_fmt, *plain_fmt);
+
+ return rc;
+}
+
+static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node **res, int32_t vc, uint32_t dt)
+{
+ struct cam_ife_csid_cid_data *cid_data;
+ uint32_t i = 0;
+
+ *res = NULL;
+
+ /* Return already reserved CID if the VC/DT matches */
+ for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+ if (csid_hw->cid_res[i].res_state >=
+ CAM_ISP_RESOURCE_STATE_RESERVED) {
+ cid_data = (struct cam_ife_csid_cid_data *)
+ csid_hw->cid_res[i].res_priv;
+ if (cid_data->vc == vc && cid_data->dt == dt) {
+ cid_data->cnt++;
+ *res = &csid_hw->cid_res[i];
+ return 0;
+ }
+ }
+ }
+
+ for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+ if (csid_hw->cid_res[i].res_state ==
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ cid_data = (struct cam_ife_csid_cid_data *)
+ csid_hw->cid_res[i].res_priv;
+ cid_data->vc = vc;
+ cid_data->dt = dt;
+ cid_data->cnt = 1;
+ csid_hw->cid_res[i].res_state =
+ CAM_ISP_RESOURCE_STATE_RESERVED;
+ *res = &csid_hw->cid_res[i];
+ CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->cid_res[i].res_id);
+ return 0;
+ }
+ }
+
+ CAM_ERR(CAM_ISP, "CSID:%d Free cid is not available",
+ csid_hw->hw_intf->hw_idx);
+
+ return -EINVAL;
+}
+
+
+static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
+{
+ struct cam_hw_soc_info *soc_info;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ int rc = 0;
+ uint32_t val = 0, i;
+ uint32_t status;
+
+ soc_info = &csid_hw->hw_info->soc_info;
+ csid_reg = csid_hw->csid_info->csid_reg;
+
+ if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->hw_info->hw_state);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "CSID:%d Csid reset",
+ csid_hw->hw_intf->hw_idx);
+
+ /* Mask all interrupts */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+ if (csid_reg->cmn_reg->num_pix)
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+
+ if (csid_reg->cmn_reg->num_ppp)
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
+
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+ /* clear all interrupts */
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+ cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+ if (csid_reg->cmn_reg->num_pix)
+ cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+ if (csid_reg->cmn_reg->num_ppp)
+ cam_io_w_mb(csid_reg->cmn_reg->ppp_irq_mask_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
+
+ for (i = 0 ; i < csid_reg->cmn_reg->num_rdis; i++)
+ cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+ cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
+ csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+ /* enable the IPP and RDI format measure */
+ if (csid_reg->cmn_reg->num_pix)
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_cfg0_addr);
+
+ if (csid_reg->cmn_reg->num_ppp)
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_cfg0_addr);
+
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+ cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_cfg0_addr);
+
+ /* perform the top CSID HW and SW registers reset */
+ cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+ rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_top_irq_status_addr,
+ status, (status & 0x1) == 0x1,
+ CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ rc = -ETIMEDOUT;
+ }
+
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+ if (val != 0)
+ CAM_ERR(CAM_ISP, "CSID:%d IRQ value after reset rc = %d",
+ csid_hw->hw_intf->hw_idx, val);
+ csid_hw->error_irq_count = 0;
+
+ return rc;
+}
+
+static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
+ struct cam_csid_reset_cfg_args *reset)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_isp_resource_node *res;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ uint32_t reset_strb_addr, reset_strb_val, val, id;
+ struct completion *complete;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+ res = reset->node_res;
+
+ if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid hw state :%d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->hw_info->hw_state);
+ return -EINVAL;
+ }
+
+ if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP, "CSID:%d resource:%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
+
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+ if (!csid_reg->ipp_reg) {
+ CAM_ERR(CAM_ISP, "CSID:%d IPP not supported :%d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ reset_strb_addr = csid_reg->ipp_reg->csid_pxl_rst_strobes_addr;
+ complete = &csid_hw->csid_ipp_complete;
+
+ /* Enable path reset done interrupt */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+ val |= CSID_PATH_INFO_RST_DONE;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+
+ } else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) {
+ if (!csid_reg->ppp_reg) {
+ CAM_ERR(CAM_ISP, "CSID:%d PPP not supported :%d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ reset_strb_addr = csid_reg->ppp_reg->csid_pxl_rst_strobes_addr;
+ complete = &csid_hw->csid_ppp_complete;
+
+ /* Enable path reset done interrupt */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
+ val |= CSID_PATH_INFO_RST_DONE;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
+ } else {
+ id = res->res_id;
+ if (!csid_reg->rdi_reg[id]) {
+ CAM_ERR(CAM_ISP, "CSID:%d RDI res not supported :%d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ reset_strb_addr =
+ csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
+ complete =
+ &csid_hw->csid_rdin_complete[id];
+
+ /* Enable path reset done interrupt */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+ val |= CSID_PATH_INFO_RST_DONE;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+ }
+
+ init_completion(complete);
+ reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
+
+ /* Enable the Test gen before reset */
+ cam_io_w_mb(1, csid_hw->hw_info->soc_info.reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+ /* Reset the corresponding ife csid path */
+ cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
+ reset_strb_addr);
+
+ rc = wait_for_completion_timeout(complete,
+ msecs_to_jiffies(IFE_CSID_TIMEOUT));
+ if (rc <= 0) {
+ CAM_ERR(CAM_ISP, "CSID:%d Res id %d fail rc = %d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_id, rc);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ }
+
+ /* Disable Test Gen after reset*/
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+end:
+ return rc;
+
+}
+
+static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
+ struct cam_csid_hw_reserve_resource_args *cid_reserv)
+{
+ int rc = 0;
+ struct cam_ife_csid_cid_data *cid_data;
+ uint32_t camera_hw_version;
+
+ CAM_DBG(CAM_ISP,
+ "CSID:%d res_sel:0x%x Lane type:%d lane_num:%d dt:%d vc:%d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->res_type,
+ cid_reserv->in_port->lane_type,
+ cid_reserv->in_port->lane_num,
+ cid_reserv->in_port->dt,
+ cid_reserv->in_port->vc);
+
+ if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid phy sel %d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->res_type);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
+ cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid lane type %d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->lane_type);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_DPHY &&
+ cid_reserv->in_port->lane_num > 4) &&
+ cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid lane num %d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->lane_num);
+ rc = -EINVAL;
+ goto end;
+ }
+ if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
+ cid_reserv->in_port->lane_num > 3) &&
+ cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+ CAM_ERR(CAM_ISP, " CSID:%d Invalid lane type %d & num %d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->lane_type,
+ cid_reserv->in_port->lane_num);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* CSID CSI2 v2.0 supports 31 vc */
+ if (cid_reserv->in_port->dt > 0x3f ||
+ cid_reserv->in_port->vc > 0x1f) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->vc, cid_reserv->in_port->dt);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
+ (cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
+ cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
+ CAM_ERR(CAM_ISP, " CSID:%d Invalid tpg decode fmt %d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->format);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
+ CAM_ERR(CAM_ISP,
+ "CSID%d reserve cnt reached max",
+ csid_hw->hw_intf->hw_idx);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed to get HW version rc:%d", rc);
+ goto end;
+ }
+ CAM_DBG(CAM_ISP, "HW version: %d", camera_hw_version);
+
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_NONE:
+ case CAM_CPAS_TITAN_MAX:
+ CAM_ERR(CAM_ISP, "Invalid HW version: %d", camera_hw_version);
+ break;
+ case CAM_CPAS_TITAN_170_V100:
+ case CAM_CPAS_TITAN_170_V110:
+ case CAM_CPAS_TITAN_170_V120:
+ if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_PHY_3 &&
+ csid_hw->hw_intf->hw_idx != 2) {
+ rc = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ break;
+ }
+ CAM_DBG(CAM_ISP, "Reserve_cnt %u", csid_hw->csi2_reserve_cnt);
+
+ if (csid_hw->csi2_reserve_cnt) {
+ /* current configure res type should match requested res type */
+ if (csid_hw->res_type != cid_reserv->in_port->res_type) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+ if (csid_hw->csi2_rx_cfg.lane_cfg !=
+ cid_reserv->in_port->lane_cfg ||
+ csid_hw->csi2_rx_cfg.lane_type !=
+ cid_reserv->in_port->lane_type ||
+ csid_hw->csi2_rx_cfg.lane_num !=
+ cid_reserv->in_port->lane_num) {
+ rc = -EINVAL;
+ goto end;
+ }
+ } else {
+ if (csid_hw->tpg_cfg.in_format !=
+ cid_reserv->in_port->format ||
+ csid_hw->tpg_cfg.width !=
+ cid_reserv->in_port->left_width ||
+ csid_hw->tpg_cfg.height !=
+ cid_reserv->in_port->height ||
+ csid_hw->tpg_cfg.test_pattern !=
+ cid_reserv->in_port->test_pattern) {
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ }
+
+ switch (cid_reserv->res_id) {
+ case CAM_IFE_PIX_PATH_RES_IPP:
+ if (csid_hw->ipp_res.res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_DBG(CAM_ISP,
+ "CSID:%d IPP resource not available",
+ csid_hw->hw_intf->hw_idx);
+ rc = -EINVAL;
+ goto end;
+ }
+ break;
+ case CAM_IFE_PIX_PATH_RES_PPP:
+ if (csid_hw->ppp_res.res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d PPP resource not available state %d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->ppp_res.res_state);
+ rc = -EINVAL;
+ goto end;
+ }
+ break;
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ if (csid_hw->rdi_res[cid_reserv->res_id].res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d RDI:%d resource not available",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID%d: Invalid csid path",
+ csid_hw->hw_intf->hw_idx);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = cam_ife_csid_cid_get(csid_hw,
+ &cid_reserv->node_res,
+ cid_reserv->in_port->vc,
+ cid_reserv->in_port->dt);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CSID:%d CID Reserve failed res_type %d",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->in_port->res_type);
+ goto end;
+ }
+ cid_data = (struct cam_ife_csid_cid_data *)
+ cid_reserv->node_res->res_priv;
+
+ if (!csid_hw->csi2_reserve_cnt) {
+ csid_hw->res_type = cid_reserv->in_port->res_type;
+
+ csid_hw->csi2_rx_cfg.lane_cfg =
+ cid_reserv->in_port->lane_cfg;
+ csid_hw->csi2_rx_cfg.lane_type =
+ cid_reserv->in_port->lane_type;
+ csid_hw->csi2_rx_cfg.lane_num =
+ cid_reserv->in_port->lane_num;
+
+ if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+ csid_hw->csi2_rx_cfg.phy_sel = 0;
+ if (cid_reserv->in_port->format >
+ CAM_FORMAT_MIPI_RAW_16) {
+ CAM_ERR(CAM_ISP, " Wrong TPG format");
+ rc = -EINVAL;
+ goto end;
+ }
+ csid_hw->tpg_cfg.in_format =
+ cid_reserv->in_port->format;
+ csid_hw->tpg_cfg.usage_type =
+ cid_reserv->in_port->usage_type;
+ if (cid_reserv->in_port->usage_type)
+ csid_hw->tpg_cfg.width =
+ (cid_reserv->in_port->right_stop + 1);
+ else
+ csid_hw->tpg_cfg.width =
+ cid_reserv->in_port->left_width;
+
+ csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
+ csid_hw->tpg_cfg.test_pattern =
+ cid_reserv->in_port->test_pattern;
+
+ CAM_DBG(CAM_ISP, "CSID:%d TPG width:%d height:%d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->tpg_cfg.width,
+ csid_hw->tpg_cfg.height);
+
+ cid_data->tpg_set = 1;
+ } else {
+ csid_hw->csi2_rx_cfg.phy_sel =
+ (cid_reserv->in_port->res_type & 0xFF) - 1;
+ }
+ }
+
+ csid_hw->csi2_reserve_cnt++;
+ CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired",
+ csid_hw->hw_intf->hw_idx,
+ cid_reserv->node_res->res_id);
+
+end:
+ return rc;
+}
+
+
+static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
+ struct cam_csid_hw_reserve_resource_args *reserve)
+{
+ int rc = 0;
+ struct cam_ife_csid_path_cfg *path_data;
+ struct cam_isp_resource_node *res;
+
+ /* CSID CSI2 v2.0 supports 31 vc */
+ if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
+ (reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d",
+ csid_hw->hw_intf->hw_idx,
+ reserve->in_port->vc, reserve->in_port->dt,
+ reserve->sync_mode);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ switch (reserve->res_id) {
+ case CAM_IFE_PIX_PATH_RES_IPP:
+ if (csid_hw->ipp_res.res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d IPP resource not available %d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->ipp_res.res_state);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (cam_ife_csid_is_ipp_ppp_format_supported(
+ reserve->in_port->format)) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res id:%d un support format %d",
+ csid_hw->hw_intf->hw_idx, reserve->res_id,
+ reserve->in_port->format);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* assign the IPP resource */
+ res = &csid_hw->ipp_res;
+ CAM_DBG(CAM_ISP,
+ "CSID:%d IPP resource:%d acquired successfully",
+ csid_hw->hw_intf->hw_idx, res->res_id);
+
+ break;
+ case CAM_IFE_PIX_PATH_RES_PPP:
+ if (csid_hw->ppp_res.res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d PPP resource not available %d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->ppp_res.res_state);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (cam_ife_csid_is_ipp_ppp_format_supported(
+ reserve->in_port->format)) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res id:%d unsupported format %d",
+ csid_hw->hw_intf->hw_idx, reserve->res_id,
+ reserve->in_port->format);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* assign the PPP resource */
+ res = &csid_hw->ppp_res;
+ CAM_DBG(CAM_ISP,
+ "CSID:%d PPP resource:%d acquired successfully",
+ csid_hw->hw_intf->hw_idx, res->res_id);
+
+ break;
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ if (csid_hw->rdi_res[reserve->res_id].res_state !=
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d RDI:%d resource not available %d",
+ csid_hw->hw_intf->hw_idx,
+ reserve->res_id,
+ csid_hw->rdi_res[reserve->res_id].res_state);
+ rc = -EINVAL;
+ goto end;
+ } else {
+ res = &csid_hw->rdi_res[reserve->res_id];
+ CAM_DBG(CAM_ISP,
+ "CSID:%d RDI resource:%d acquire success",
+ csid_hw->hw_intf->hw_idx,
+ res->res_id);
+ }
+
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id:%d",
+ csid_hw->hw_intf->hw_idx, reserve->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ path_data = (struct cam_ife_csid_path_cfg *)res->res_priv;
+
+ path_data->cid = reserve->cid;
+ path_data->in_format = reserve->in_port->format;
+ path_data->out_format = reserve->out_port->format;
+ path_data->sync_mode = reserve->sync_mode;
+ path_data->height = reserve->in_port->height;
+ path_data->start_line = reserve->in_port->line_start;
+ path_data->end_line = reserve->in_port->line_stop;
+
+ /* Enable RDI crop for single ife use case only */
+ switch (reserve->res_id) {
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ if (reserve->in_port->usage_type)
+ path_data->crop_enable = false;
+ else
+ path_data->crop_enable = true;
+
+ break;
+ case CAM_IFE_PIX_PATH_RES_IPP:
+ path_data->crop_enable = true;
+ break;
+ case CAM_IFE_PIX_PATH_RES_PPP:
+ path_data->crop_enable = false;
+ break;
+ default:
+ rc = -EINVAL;
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP,
+ "Res id: %d height:%d line_start %d line_stop %d crop_en %d",
+ reserve->res_id, reserve->in_port->height,
+ reserve->in_port->line_start, reserve->in_port->line_stop,
+ path_data->crop_enable);
+
+ if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+ path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
+ path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
+ } else {
+ path_data->dt = reserve->in_port->dt;
+ path_data->vc = reserve->in_port->vc;
+ }
+
+ if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+ path_data->start_pixel = reserve->in_port->left_start;
+ path_data->end_pixel = reserve->in_port->left_stop;
+ path_data->width = reserve->in_port->left_width;
+ CAM_DBG(CAM_ISP, "CSID:%d master:startpixel 0x%x endpixel:0x%x",
+ csid_hw->hw_intf->hw_idx, path_data->start_pixel,
+ path_data->end_pixel);
+ CAM_DBG(CAM_ISP, "CSID:%d master:line start:0x%x line end:0x%x",
+ csid_hw->hw_intf->hw_idx, path_data->start_line,
+ path_data->end_line);
+ } else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+ path_data->master_idx = reserve->master_idx;
+ CAM_DBG(CAM_ISP, "CSID:%d master_idx=%d",
+ csid_hw->hw_intf->hw_idx, path_data->master_idx);
+ path_data->start_pixel = reserve->in_port->right_start;
+ path_data->end_pixel = reserve->in_port->right_stop;
+ path_data->width = reserve->in_port->right_width;
+ CAM_DBG(CAM_ISP, "CSID:%d slave:start:0x%x end:0x%x width 0x%x",
+ csid_hw->hw_intf->hw_idx, path_data->start_pixel,
+ path_data->end_pixel, path_data->width);
+ CAM_DBG(CAM_ISP, "CSID:%d slave:line start:0x%x line end:0x%x",
+ csid_hw->hw_intf->hw_idx, path_data->start_line,
+ path_data->end_line);
+ } else {
+ path_data->width = reserve->in_port->left_width;
+ path_data->start_pixel = reserve->in_port->left_start;
+ path_data->end_pixel = reserve->in_port->left_stop;
+ CAM_DBG(CAM_ISP, "Res id: %d left width %d start: %d stop:%d",
+ reserve->res_id, reserve->in_port->left_width,
+ reserve->in_port->left_start,
+ reserve->in_port->left_stop);
+ }
+
+ CAM_DBG(CAM_ISP, "Res %d width %d height %d", reserve->res_id,
+ path_data->width, path_data->height);
+ reserve->node_res = res;
+
+end:
+ return rc;
+}
+
+static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw *csid_hw)
+{
+ int rc = 0;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ uint32_t i, val, clk_lvl;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ /* overflow check before increment */
+ if (csid_hw->hw_info->open_count == UINT_MAX) {
+ CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+ csid_hw->hw_intf->hw_idx);
+ return -EINVAL;
+ }
+
+ /* Increment ref Count */
+ csid_hw->hw_info->open_count++;
+ if (csid_hw->hw_info->open_count > 1) {
+ CAM_DBG(CAM_ISP, "CSID hw has already been enabled");
+ return rc;
+ }
+
+ CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
+ csid_hw->hw_intf->hw_idx);
+
+ clk_lvl = cam_ife_csid_get_vote_level(soc_info, csid_hw->clk_rate);
+ CAM_DBG(CAM_ISP, "CSID clock lvl %u", clk_lvl);
+
+ rc = cam_ife_csid_enable_soc_resources(soc_info, clk_lvl);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed",
+ csid_hw->hw_intf->hw_idx);
+ goto err;
+ }
+
+ csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
+ /* Reset CSID top */
+ rc = cam_ife_csid_global_reset(csid_hw);
+ if (rc)
+ goto disable_soc;
+
+ /* clear all interrupts */
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+ cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+ if (csid_reg->cmn_reg->num_pix)
+ cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+ if (csid_reg->cmn_reg->num_ppp)
+ cam_io_w_mb(csid_reg->cmn_reg->ppp_irq_mask_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
+
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+ cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_hw_version_addr);
+ CAM_DBG(CAM_ISP, "CSID:%d CSID HW version: 0x%x",
+ csid_hw->hw_intf->hw_idx, val);
+
+ return 0;
+
+disable_soc:
+ cam_ife_csid_disable_soc_resources(soc_info);
+ csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+err:
+ csid_hw->hw_info->open_count--;
+ return rc;
+}
+
+static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
+{
+ int rc = -EINVAL;
+ struct cam_hw_soc_info *soc_info;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+
+ /* Check for refcount */
+ if (!csid_hw->hw_info->open_count) {
+ CAM_WARN(CAM_ISP, "Unbalanced disable_hw");
+ return rc;
+ }
+
+ /* Decrement ref Count */
+ csid_hw->hw_info->open_count--;
+
+ if (csid_hw->hw_info->open_count) {
+ rc = 0;
+ return rc;
+ }
+
+ soc_info = &csid_hw->hw_info->soc_info;
+ csid_reg = csid_hw->csid_info->csid_reg;
+
+ CAM_DBG(CAM_ISP, "%s:Calling Global Reset\n", __func__);
+ cam_ife_csid_global_reset(csid_hw);
+ CAM_DBG(CAM_ISP, "%s:Global Reset Done\n", __func__);
+
+ CAM_DBG(CAM_ISP, "CSID:%d De-init CSID HW",
+ csid_hw->hw_intf->hw_idx);
+
+ /*disable the top IRQ interrupt */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+ rc = cam_ife_csid_disable_soc_resources(soc_info);
+ if (rc)
+ CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
+ csid_hw->hw_intf->hw_idx);
+
+ csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+ csid_hw->error_irq_count = 0;
+
+ return rc;
+}
+
+
+static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ int rc = 0;
+ uint32_t val = 0;
+ struct cam_hw_soc_info *soc_info;
+ const struct cam_ife_csid_reg_offset *csid_reg = NULL;
+
+ csid_hw->tpg_start_cnt++;
+ if (csid_hw->tpg_start_cnt == 1) {
+ /*Enable the TPG */
+ CAM_DBG(CAM_ISP, "CSID:%d start CSID TPG",
+ csid_hw->hw_intf->hw_idx);
+
+ soc_info = &csid_hw->hw_info->soc_info;
+ {
+ uint32_t val;
+ uint32_t i;
+ uint32_t base = 0x600;
+
+ CAM_DBG(CAM_ISP, "================ TPG ============");
+ for (i = 0; i < 16; i++) {
+ val = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ base + i * 4);
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
+ (base + i*4), val);
+ }
+
+ CAM_DBG(CAM_ISP, "================ IPP =============");
+ base = 0x200;
+ for (i = 0; i < 10; i++) {
+ val = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ base + i * 4);
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
+ (base + i*4), val);
+ }
+
+ CAM_DBG(CAM_ISP, "================ RX =============");
+ base = 0x100;
+ for (i = 0; i < 5; i++) {
+ val = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ base + i * 4);
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
+ (base + i*4), val);
+ }
+ }
+
+ /* Enable the IFE force clock on for dual isp case */
+ csid_reg = csid_hw->csid_info->csid_reg;
+ if (csid_hw->tpg_cfg.usage_type) {
+ rc = cam_ife_csid_enable_ife_force_clock_on(soc_info,
+ csid_reg->tpg_reg->tpg_cpas_ife_reg_offset);
+ if (rc)
+ return rc;
+ }
+
+ CAM_DBG(CAM_ISP, "============ TPG control ============");
+ val = (4 << 20);
+ val |= (0x80 << 8);
+ val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
+ val |= 7;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", 0x600, val);
+ }
+
+ return 0;
+}
+
+static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info;
+ const struct cam_ife_csid_reg_offset *csid_reg = NULL;
+
+ if (csid_hw->tpg_start_cnt)
+ csid_hw->tpg_start_cnt--;
+
+ if (csid_hw->tpg_start_cnt)
+ return 0;
+
+ soc_info = &csid_hw->hw_info->soc_info;
+ csid_reg = csid_hw->csid_info->csid_reg;
+
+ /* disable the TPG */
+ if (!csid_hw->tpg_start_cnt) {
+ CAM_DBG(CAM_ISP, "CSID:%d stop CSID TPG",
+ csid_hw->hw_intf->hw_idx);
+
+ /* Disable the IFE force clock on for dual isp case */
+ if (csid_hw->tpg_cfg.usage_type)
+ rc = cam_ife_csid_disable_ife_force_clock_on(soc_info,
+ csid_reg->tpg_reg->tpg_cpas_ife_reg_offset);
+
+ /*stop the TPG */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+ }
+
+ return 0;
+}
+
+
+static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ uint32_t val = 0;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ CAM_DBG(CAM_ISP, "CSID:%d TPG config",
+ csid_hw->hw_intf->hw_idx);
+
+ /* configure one DT, infinite frames */
+ val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
+
+ /* vertical blanking count = 0x3FF, horzontal blanking count = 0x740*/
+ val = (0x3FF << 12) | 0x740;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
+
+ cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
+ csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_lfsr_seed_addr);
+
+ val = csid_hw->tpg_cfg.width << 16 |
+ csid_hw->tpg_cfg.height;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_dt_n_cfg_0_addr);
+
+ cam_io_w_mb(CAM_IFE_CSID_TPG_DT_VAL, soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_dt_n_cfg_1_addr);
+
+ /*
+ * in_format is the same as the input resource format.
+ * it is one larger than the register spec format.
+ */
+ val = ((csid_hw->tpg_cfg.in_format - 1) << 16) | 0x8;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
+
+ /* static frame with split color bar */
+ val = 1 << 5;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
+ /* config pix pattern */
+ cam_io_w_mb(csid_hw->tpg_cfg.test_pattern,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->tpg_reg->csid_tpg_common_gen_cfg_addr);
+
+ return 0;
+}
+
+static int cam_ife_csid_enable_csi2(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ int rc = 0;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_ife_csid_cid_data *cid_data;
+ uint32_t val = 0;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+ CAM_DBG(CAM_ISP, "CSID:%d count:%d config csi2 rx",
+ csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+ /* overflow check before increment */
+ if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
+ CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+ csid_hw->hw_intf->hw_idx);
+ return -EINVAL;
+ }
+
+ cid_data = (struct cam_ife_csid_cid_data *)res->res_priv;
+
+ res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+ csid_hw->csi2_cfg_cnt++;
+ if (csid_hw->csi2_cfg_cnt > 1)
+ return rc;
+
+ /* rx cfg0 */
+ val = (csid_hw->csi2_rx_cfg.lane_num - 1) |
+ (csid_hw->csi2_rx_cfg.lane_cfg << 4) |
+ (csid_hw->csi2_rx_cfg.lane_type << 24);
+ val |= (csid_hw->csi2_rx_cfg.phy_sel & 0x3) << 20;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+
+ /* rx cfg1*/
+ val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
+ /* if VC value is more than 3 than set full width of VC */
+ if (cid_data->vc > 3)
+ val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
+
+ /* enable packet ecc correction */
+ val |= 1;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+ if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+ /* Config the TPG */
+ rc = cam_ife_csid_config_tpg(csid_hw, res);
+ if (rc) {
+ res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ return rc;
+ }
+ }
+
+ /*Enable the CSI2 rx inerrupts */
+ val = CSID_CSI2_RX_INFO_RST_DONE |
+ CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
+ CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
+ CSID_CSI2_RX_ERROR_CRC |
+ CSID_CSI2_RX_ERROR_ECC |
+ CSID_CSI2_RX_ERROR_MMAPPED_VC_DT |
+ CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW |
+ CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME |
+ CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
+
+ /* Enable the interrupt based on csid debug info set */
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ)
+ val |= CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED |
+ CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED |
+ CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED |
+ CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED;
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ)
+ val |= CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED |
+ CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED |
+ CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED |
+ CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED;
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+ val |= CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED;
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+ val |= CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED;
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+ val |= CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+ return 0;
+}
+
+static int cam_ife_csid_disable_csi2(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+
+ if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id :%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
+ return -EINVAL;
+ }
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+ CAM_DBG(CAM_ISP, "CSID:%d cnt : %d Disable csi2 rx",
+ csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+ if (csid_hw->csi2_cfg_cnt)
+ csid_hw->csi2_cfg_cnt--;
+
+ if (csid_hw->csi2_cfg_cnt)
+ return 0;
+
+ /* Disable the CSI2 rx inerrupts */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+ /* Reset the Rx CFG registers */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+ res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+ return 0;
+}
+
+static void cam_ife_csid_halt_csi2(
+ struct cam_ife_csid_hw *csid_hw)
+{
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+ CAM_INFO(CAM_ISP, "CSID: %d cnt: %d Halt csi2 rx",
+ csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+ /* Disable the CSI2 rx inerrupts */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+ /* Reset the Rx CFG registers */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+}
+
+static int cam_ife_csid_init_config_pxl_path(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ int rc = 0;
+ struct cam_ife_csid_path_cfg *path_data;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ const struct cam_ife_csid_pxl_reg_offset *pxl_reg = NULL;
+ bool is_ipp;
+ uint32_t decode_format = 0, plain_format = 0, val = 0;
+
+ path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+ is_ipp = true;
+ pxl_reg = csid_reg->ipp_reg;
+ } else {
+ is_ipp = false;
+ pxl_reg = csid_reg->ppp_reg;
+ }
+
+ if (!pxl_reg) {
+ CAM_ERR(CAM_ISP, "CSID:%d %s:%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx,
+ (is_ipp) ? "IPP" : "PPP", res->res_id);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "Config %s Path", (is_ipp) ? "IPP" : "PPP");
+ rc = cam_ife_csid_get_format_ipp_ppp(path_data->in_format,
+ &decode_format, &plain_format);
+ if (rc)
+ return rc;
+
+ /*
+ * configure Pxl path and enable the time stamp capture.
+ * enable the HW measrurement blocks
+ */
+ val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+ (path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+ (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+ (decode_format << csid_reg->cmn_reg->fmt_shift_val) |
+ (path_data->crop_enable <<
+ csid_reg->cmn_reg->crop_h_en_shift_val) |
+ (path_data->crop_enable <<
+ csid_reg->cmn_reg->crop_v_en_shift_val) |
+ (1 << 1) | 1;
+
+ val |= (1 << pxl_reg->pix_store_en_shift_val);
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg0_addr);
+
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg1_addr);
+
+ /* select the post irq sub sample strobe for time stamp capture */
+ val |= CSID_TIMESTAMP_STB_POST_IRQ;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg1_addr);
+
+ if (path_data->crop_enable) {
+ val = (((path_data->end_pixel & 0xFFFF) <<
+ csid_reg->cmn_reg->crop_shift) |
+ (path_data->start_pixel & 0xFFFF));
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_hcrop_addr);
+ CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x",
+ csid_hw->hw_intf->hw_idx, val);
+
+ val = (((path_data->end_line & 0xFFFF) <<
+ csid_reg->cmn_reg->crop_shift) |
+ (path_data->start_line & 0xFFFF));
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_vcrop_addr);
+ CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
+ csid_hw->hw_intf->hw_idx, val);
+
+ /* Enable generating early eof strobe based on crop config */
+ if (!(csid_hw->csid_debug & CSID_DEBUG_DISABLE_EARLY_EOF)) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg0_addr);
+ val |= (1 << pxl_reg->early_eof_en_shift_val);
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg0_addr);
+ }
+ }
+
+ /* set frame drop pattern to 0 and period to 1 */
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_frm_drop_period_addr);
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_frm_drop_pattern_addr);
+ /* set irq sub sample pattern to 0 and period to 1 */
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_irq_subsample_period_addr);
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_irq_subsample_pattern_addr);
+ /* set pxl drop pattern to 0 and period to 1 */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_pix_drop_pattern_addr);
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_pix_drop_period_addr);
+ /* set line drop pattern to 0 and period to 1 */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_line_drop_pattern_addr);
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_line_drop_period_addr);
+
+
+ /* Enable the Pxl path */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg0_addr);
+ val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO)
+ val |= csid_reg->cmn_reg->format_measure_en_val;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg0_addr);
+
+ /* Enable the HBI/VBI counter */
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_format_measure_cfg0_addr);
+ val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+ cam_io_w_mb(val,
+ soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_format_measure_cfg0_addr);
+ }
+
+ /* configure the rx packet capture based on csid debug set */
+ val = 0;
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+ val = ((1 <<
+ csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) |
+ (path_data->vc <<
+ csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift));
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+ val |= ((1 <<
+ csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) |
+ (path_data->dt <<
+ csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) |
+ (path_data->vc <<
+ csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift));
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+ val |= ((1 <<
+ csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) |
+ (path_data->dt <<
+ csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) |
+ (path_data->vc <<
+ csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift));
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr);
+ CAM_DBG(CAM_ISP, "rx capture control value 0x%x", val);
+
+ res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+ return rc;
+}
+
+static int cam_ife_csid_deinit_pxl_path(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ int rc = 0;
+ uint32_t val;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ const struct cam_ife_csid_pxl_reg_offset *pxl_reg = NULL;
+ bool is_ipp;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+ is_ipp = true;
+ pxl_reg = csid_reg->ipp_reg;
+ } else {
+ is_ipp = false;
+ pxl_reg = csid_reg->ppp_reg;
+ }
+
+ if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d %s Res type %d res_id:%d in wrong state %d",
+ csid_hw->hw_intf->hw_idx,
+ (is_ipp) ? "IPP" : "PPP",
+ res->res_type, res->res_id, res->res_state);
+ rc = -EINVAL;
+ }
+
+ if (!pxl_reg) {
+ CAM_ERR(CAM_ISP, "CSID:%d %s %d is not supported on HW",
+ csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+ res->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg0_addr);
+ if (val & csid_reg->cmn_reg->format_measure_en_val) {
+ val &= ~csid_reg->cmn_reg->format_measure_en_val;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_cfg0_addr);
+
+ /* Disable the HBI/VBI counter */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_format_measure_cfg0_addr);
+ val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_format_measure_cfg0_addr);
+ }
+
+end:
+ res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ return rc;
+}
+
+static int cam_ife_csid_enable_pxl_path(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_ife_csid_path_cfg *path_data;
+ const struct cam_ife_csid_pxl_reg_offset *pxl_reg = NULL;
+ bool is_ipp;
+ uint32_t val = 0;
+
+ path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+ is_ipp = true;
+ pxl_reg = csid_reg->ipp_reg;
+ } else {
+ is_ipp = false;
+ pxl_reg = csid_reg->ppp_reg;
+ }
+
+ if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d %s path res type:%d res_id:%d Invalid state%d",
+ csid_hw->hw_intf->hw_idx,
+ (is_ipp) ? "IPP" : "PPP",
+ res->res_type, res->res_id, res->res_state);
+ return -EINVAL;
+ }
+
+ if (!pxl_reg) {
+ CAM_ERR(CAM_ISP, "CSID:%d %s %d not supported on HW",
+ csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+ res->res_id);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "Enable %s path", (is_ipp) ? "IPP" : "PPP");
+
+ /* Set master or slave path */
+ if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+ /*Set halt mode as master */
+ val = CSID_HALT_MODE_MASTER << 2;
+ else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+ /*Set halt mode as slave and set master idx */
+ val = path_data->master_idx << 4 | CSID_HALT_MODE_SLAVE << 2;
+ else
+ /* Default is internal halt mode */
+ val = 0;
+
+ /*
+ * Resume at frame boundary if Master or No Sync.
+ * Slave will get resume command from Master.
+ */
+ if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
+ path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
+ val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_ctrl_addr);
+
+ CAM_DBG(CAM_ISP, "CSID:%d %s Ctrl val: 0x%x",
+ csid_hw->hw_intf->hw_idx,
+ (is_ipp) ? "IPP" : "PPP", val);
+
+ /* Enable the required pxl path interrupts */
+ val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)
+ val |= CSID_PATH_INFO_INPUT_SOF;
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)
+ val |= CSID_PATH_INFO_INPUT_EOF;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_irq_mask_addr);
+
+ CAM_DBG(CAM_ISP, "Enable %s IRQ mask 0x%x",
+ (is_ipp) ? "IPP" : "PPP", val);
+
+ res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+ return 0;
+}
+
+static int cam_ife_csid_disable_pxl_path(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res,
+ enum cam_ife_csid_halt_cmd stop_cmd)
+{
+ int rc = 0;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_ife_csid_path_cfg *path_data;
+ const struct cam_ife_csid_pxl_reg_offset *pxl_reg;
+ bool is_ipp;
+
+ path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
+ return -EINVAL;
+ }
+
+ if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+ res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx, res->res_id, res->res_state);
+ return rc;
+ }
+
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+ is_ipp = true;
+ pxl_reg = csid_reg->ipp_reg;
+ } else {
+ is_ipp = false;
+ pxl_reg = csid_reg->ppp_reg;
+ }
+
+ if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+ CAM_DBG(CAM_ISP, "CSID:%d %s path Res:%d Invalid state%d",
+ csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+ res->res_id, res->res_state);
+ return -EINVAL;
+ }
+
+ if (!pxl_reg) {
+ CAM_ERR(CAM_ISP, "CSID:%d %s:%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+ res->res_id);
+ return -EINVAL;
+ }
+
+ if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+ stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+ CAM_ERR(CAM_ISP, "CSID:%d %s path un supported stop command:%d",
+ csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+ stop_cmd);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "CSID:%d res_id:%d %s path",
+ csid_hw->hw_intf->hw_idx, res->res_id,
+ (is_ipp) ? "IPP" : "PPP");
+
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ pxl_reg->csid_pxl_irq_mask_addr);
+
+ return rc;
+}
+
+static int cam_ife_csid_init_config_rdi_path(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ int rc = 0;
+ struct cam_ife_csid_path_cfg *path_data;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
+ uint32_t format_measure_addr;
+
+ path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ id = res->res_id;
+ if (!csid_reg->rdi_reg[id]) {
+ CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx, id);
+ return -EINVAL;
+ }
+
+ rc = cam_ife_csid_get_format_rdi(path_data->in_format,
+ path_data->out_format, &path_format, &plain_fmt);
+ if (rc)
+ return rc;
+
+ /* if path decode format is payload only then RDI crop is not applied */
+ if (path_format == 0xF)
+ path_data->crop_enable = 0;
+
+ /*
+ * RDI path config and enable the time stamp capture
+ * Enable the measurement blocks
+ */
+ val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+ (path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+ (path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+ (path_format << csid_reg->cmn_reg->fmt_shift_val) |
+ (plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
+ (path_data->crop_enable <<
+ csid_reg->cmn_reg->crop_h_en_shift_val) |
+ (path_data->crop_enable <<
+ csid_reg->cmn_reg->crop_v_en_shift_val) |
+ (1 << 2) | 3;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+ /* select the post irq sub sample strobe for time stamp capture */
+ cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
+
+ if (path_data->crop_enable) {
+ val = (((path_data->end_pixel & 0xFFFF) <<
+ csid_reg->cmn_reg->crop_shift) |
+ (path_data->start_pixel & 0xFFFF));
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
+ CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x",
+ csid_hw->hw_intf->hw_idx, val);
+
+ val = (((path_data->end_line & 0xFFFF) <<
+ csid_reg->cmn_reg->crop_shift) |
+ (path_data->start_line & 0xFFFF));
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
+ CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
+ csid_hw->hw_intf->hw_idx, val);
+ }
+ /* set frame drop pattern to 0 and period to 1 */
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_frm_drop_period_addr);
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_frm_drop_pattern_addr);
+ /* set IRQ sum sabmple */
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_period_addr);
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_pattern_addr);
+
+ /* set pixel drop pattern to 0 and period to 1 */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
+ /* set line drop pattern to 0 and period to 1 */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_period_addr);
+
+ /* Configure the halt mode */
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+ /* Enable the RPP path */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+ val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO)
+ val |= csid_reg->cmn_reg->format_measure_en_val;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+ format_measure_addr =
+ csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr;
+
+ /* Enable the HBI/VBI counter */
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ format_measure_addr);
+ val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+ cam_io_w_mb(val,
+ soc_info->reg_map[0].mem_base + format_measure_addr);
+ }
+
+ /* configure the rx packet capture based on csid debug set */
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+ val = ((1 <<
+ csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) |
+ (path_data->vc <<
+ csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift));
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+ val |= ((1 <<
+ csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) |
+ (path_data->dt <<
+ csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) |
+ (path_data->vc <<
+ csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift));
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+ val |= ((1 <<
+ csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) |
+ (path_data->dt <<
+ csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) |
+ (path_data->vc <<
+ csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift));
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr);
+
+ res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+ return rc;
+}
+
+static int cam_ife_csid_deinit_rdi_path(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ int rc = 0;
+ uint32_t id, val, format_measure_addr;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+ id = res->res_id;
+
+ if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+ res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+ !csid_reg->rdi_reg[id]) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id%d state:%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
+ res->res_state);
+ return -EINVAL;
+ }
+
+ format_measure_addr =
+ csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr;
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+ val &= ~csid_reg->cmn_reg->format_measure_en_val;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+ /* Disable the HBI/VBI counter */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ format_measure_addr);
+ val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ format_measure_addr);
+ }
+
+ res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ return rc;
+}
+
+static int cam_ife_csid_enable_rdi_path(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ uint32_t id, val;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+ id = res->res_id;
+
+ if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+ res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+ !csid_reg->rdi_reg[id]) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d invalid res type:%d res_id:%d state%d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type, res->res_id, res->res_state);
+ return -EINVAL;
+ }
+
+ /*resume at frame boundary */
+ cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+ /* Enable the required RDI interrupts */
+ val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)
+ val |= CSID_PATH_INFO_INPUT_SOF;
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)
+ val |= CSID_PATH_INFO_INPUT_EOF;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+ res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+ return 0;
+}
+
+
+static int cam_ife_csid_disable_rdi_path(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res,
+ enum cam_ife_csid_halt_cmd stop_cmd)
+{
+ int rc = 0;
+ uint32_t id;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+ id = res->res_id;
+
+ if ((res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3) ||
+ (!csid_reg->rdi_reg[res->res_id])) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
+ return -EINVAL;
+ }
+
+ if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+ res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_id, res->res_state);
+ return rc;
+ }
+
+ if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d Res:%d Invalid res_state%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
+ res->res_state);
+ return -EINVAL;
+ }
+
+ if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+ stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+ CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+ csid_hw->hw_intf->hw_idx, stop_cmd);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
+
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+ return rc;
+}
+
+static int cam_ife_csid_get_hbi_vbi(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ uint32_t hbi, vbi;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ const struct cam_ife_csid_rdi_reg_offset *rdi_reg;
+ struct cam_hw_soc_info *soc_info;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+ res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->hw_info->hw_state);
+ return -EINVAL;
+ }
+
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+ hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_format_measure1_addr);
+ vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_format_measure2_addr);
+ } else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) {
+ hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_format_measure1_addr);
+ vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_format_measure2_addr);
+ } else {
+ rdi_reg = csid_reg->rdi_reg[res->res_id];
+ hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ rdi_reg->csid_rdi_format_measure1_addr);
+ vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ rdi_reg->csid_rdi_format_measure2_addr);
+ }
+
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u HBI: 0x%x", res->res_id,
+ hbi);
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u VBI: 0x%x", res->res_id,
+ vbi);
+
+ return 0;
+}
+
+
+static int cam_ife_csid_get_time_stamp(
+ struct cam_ife_csid_hw *csid_hw, void *cmd_args)
+{
+ struct cam_csid_get_time_stamp_args *time_stamp;
+ struct cam_isp_resource_node *res;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+ const struct cam_ife_csid_rdi_reg_offset *rdi_reg;
+ struct timespec64 ts;
+ uint32_t time_32, id;
+
+ time_stamp = (struct cam_csid_get_time_stamp_args *)cmd_args;
+ res = time_stamp->node_res;
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+ res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->hw_info->hw_state);
+ return -EINVAL;
+ }
+
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+ time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_timestamp_curr1_sof_addr);
+ time_stamp->time_stamp_val = (uint64_t) time_32;
+ time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+ time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_timestamp_curr0_sof_addr);
+ } else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) {
+ time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_timestamp_curr1_sof_addr);
+ time_stamp->time_stamp_val = (uint64_t) time_32;
+ time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+ time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_timestamp_curr0_sof_addr);
+ } else {
+ id = res->res_id;
+ rdi_reg = csid_reg->rdi_reg[id];
+ time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ rdi_reg->csid_rdi_timestamp_curr1_sof_addr);
+ time_stamp->time_stamp_val = (uint64_t) time_32;
+ time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+
+ time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ rdi_reg->csid_rdi_timestamp_curr0_sof_addr);
+ }
+
+ time_stamp->time_stamp_val |= (uint64_t) time_32;
+ time_stamp->time_stamp_val = mul_u64_u32_div(
+ time_stamp->time_stamp_val,
+ CAM_IFE_CSID_QTIMER_MUL_FACTOR,
+ CAM_IFE_CSID_QTIMER_DIV_FACTOR);
+
+ get_monotonic_boottime64(&ts);
+ time_stamp->boot_timestamp = (uint64_t)((ts.tv_sec * 1000000000) +
+ ts.tv_nsec);
+
+ return 0;
+}
+
+static int cam_ife_csid_set_csid_debug(struct cam_ife_csid_hw *csid_hw,
+ void *cmd_args)
+{
+ uint32_t *csid_debug;
+
+ csid_debug = (uint32_t *) cmd_args;
+ csid_hw->csid_debug = *csid_debug;
+ CAM_DBG(CAM_ISP, "CSID:%d set csid debug value:%d",
+ csid_hw->hw_intf->hw_idx, csid_hw->csid_debug);
+
+ return 0;
+}
+
+static int cam_ife_csid_get_hw_caps(void *hw_priv,
+ void *get_hw_cap_args, uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_ife_csid_hw_caps *hw_caps;
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+
+ if (!hw_priv || !get_hw_cap_args) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
+ return -EINVAL;
+ }
+
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+ csid_reg = csid_hw->csid_info->csid_reg;
+ hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args;
+
+ hw_caps->num_rdis = csid_reg->cmn_reg->num_rdis;
+ hw_caps->num_pix = csid_reg->cmn_reg->num_pix;
+ hw_caps->num_ppp = csid_reg->cmn_reg->num_ppp;
+ hw_caps->major_version = csid_reg->cmn_reg->major_version;
+ hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
+ hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
+
+ CAM_DBG(CAM_ISP,
+ "CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d",
+ csid_hw->hw_intf->hw_idx, hw_caps->num_rdis,
+ hw_caps->num_pix, hw_caps->major_version,
+ hw_caps->minor_version, hw_caps->version_incr);
+
+ return rc;
+}
+
+static int cam_ife_csid_reset(void *hw_priv,
+ void *reset_args, uint32_t arg_size)
+{
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_csid_reset_cfg_args *reset;
+ int rc = 0;
+
+ if (!hw_priv || !reset_args || (arg_size !=
+ sizeof(struct cam_csid_reset_cfg_args))) {
+ CAM_ERR(CAM_ISP, "CSID:Invalid args");
+ return -EINVAL;
+ }
+
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+ reset = (struct cam_csid_reset_cfg_args *)reset_args;
+
+ switch (reset->reset_type) {
+ case CAM_IFE_CSID_RESET_GLOBAL:
+ rc = cam_ife_csid_global_reset(csid_hw);
+ break;
+ case CAM_IFE_CSID_RESET_PATH:
+ rc = cam_ife_csid_path_reset(csid_hw, reset);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:Invalid reset type :%d",
+ reset->reset_type);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_ife_csid_reserve(void *hw_priv,
+ void *reserve_args, uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_csid_hw_reserve_resource_args *reserv;
+
+ if (!hw_priv || !reserve_args || (arg_size !=
+ sizeof(struct cam_csid_hw_reserve_resource_args))) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
+ return -EINVAL;
+ }
+
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+ reserv = (struct cam_csid_hw_reserve_resource_args *)reserve_args;
+
+ CAM_DBG(CAM_ISP, "res_type %d, CSID: %u",
+ reserv->res_type, csid_hw->hw_intf->hw_idx);
+
+ mutex_lock(&csid_hw->hw_info->hw_mutex);
+ switch (reserv->res_type) {
+ case CAM_ISP_RESOURCE_CID:
+ rc = cam_ife_csid_cid_reserve(csid_hw, reserv);
+ break;
+ case CAM_ISP_RESOURCE_PIX_PATH:
+ rc = cam_ife_csid_path_reserve(csid_hw, reserv);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type :%d",
+ csid_hw->hw_intf->hw_idx, reserv->res_type);
+ rc = -EINVAL;
+ break;
+ }
+ mutex_unlock(&csid_hw->hw_info->hw_mutex);
+ return rc;
+}
+
+static int cam_ife_csid_release(void *hw_priv,
+ void *release_args, uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_isp_resource_node *res;
+ struct cam_ife_csid_cid_data *cid_data;
+
+ if (!hw_priv || !release_args ||
+ (arg_size != sizeof(struct cam_isp_resource_node))) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
+ return -EINVAL;
+ }
+
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+ res = (struct cam_isp_resource_node *)release_args;
+
+ mutex_lock(&csid_hw->hw_info->hw_mutex);
+ if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+ res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+ (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+ res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if ((res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) ||
+ (res->res_state >= CAM_ISP_RESOURCE_STATE_STREAMING)) {
+ CAM_WARN(CAM_ISP,
+ "CSID:%d res type:%d Res %d in state %d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type, res->res_id,
+ res->res_state);
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP, "CSID:%d res type :%d Resource id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+ switch (res->res_type) {
+ case CAM_ISP_RESOURCE_CID:
+ cid_data = (struct cam_ife_csid_cid_data *) res->res_priv;
+ if (cid_data->cnt)
+ cid_data->cnt--;
+
+ if (!cid_data->cnt)
+ res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+ if (csid_hw->csi2_reserve_cnt)
+ csid_hw->csi2_reserve_cnt--;
+
+ if (!csid_hw->csi2_reserve_cnt)
+ memset(&csid_hw->csi2_rx_cfg, 0,
+ sizeof(struct cam_ife_csid_csi2_rx_cfg));
+
+ CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
+
+ break;
+ case CAM_ISP_RESOURCE_PIX_PATH:
+ res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ rc = -EINVAL;
+ break;
+ }
+
+end:
+ mutex_unlock(&csid_hw->hw_info->hw_mutex);
+ return rc;
+}
+
+static int cam_ife_csid_reset_retain_sw_reg(
+ struct cam_ife_csid_hw *csid_hw)
+{
+ int rc = 0;
+ uint32_t status;
+ const struct cam_ife_csid_reg_offset *csid_reg =
+ csid_hw->csid_info->csid_reg;
+ struct cam_hw_soc_info *soc_info;
+
+ soc_info = &csid_hw->hw_info->soc_info;
+ /* clear the top interrupt first */
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_top_irq_clear_addr);
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+ cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_rst_strobes_addr);
+ rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_top_irq_status_addr,
+ status, (status & 0x1) == 0x1,
+ CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ rc = -ETIMEDOUT;
+ } else {
+ CAM_DBG(CAM_ISP, "CSID:%d hw reset completed %d",
+ csid_hw->hw_intf->hw_idx, rc);
+ rc = 0;
+ }
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_top_irq_clear_addr);
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+ return rc;
+}
+
+static int cam_ife_csid_init_hw(void *hw_priv,
+ void *init_args, uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_isp_resource_node *res;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+
+ if (!hw_priv || !init_args ||
+ (arg_size != sizeof(struct cam_isp_resource_node))) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
+ return -EINVAL;
+ }
+
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+ res = (struct cam_isp_resource_node *)init_args;
+ csid_reg = csid_hw->csid_info->csid_reg;
+
+ mutex_lock(&csid_hw->hw_info->hw_mutex);
+ if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+ res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+ (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+ res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res tpe:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
+ (res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res type:%d res_id:%dInvalid state %d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type, res->res_id, res->res_state);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+ /* Initialize the csid hardware */
+ rc = cam_ife_csid_enable_hw(csid_hw);
+ if (rc)
+ goto end;
+
+ switch (res->res_type) {
+ case CAM_ISP_RESOURCE_CID:
+ rc = cam_ife_csid_enable_csi2(csid_hw, res);
+ break;
+ case CAM_ISP_RESOURCE_PIX_PATH:
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP ||
+ res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
+ rc = cam_ife_csid_init_config_pxl_path(csid_hw, res);
+ else
+ rc = cam_ife_csid_init_config_rdi_path(csid_hw, res);
+
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type);
+ break;
+ }
+
+ rc = cam_ife_csid_reset_retain_sw_reg(csid_hw);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP, "CSID: Failed in SW reset");
+
+ if (rc)
+ cam_ife_csid_disable_hw(csid_hw);
+end:
+ mutex_unlock(&csid_hw->hw_info->hw_mutex);
+ return rc;
+}
+
+static int cam_ife_csid_deinit_hw(void *hw_priv,
+ void *deinit_args, uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_isp_resource_node *res;
+
+ if (!hw_priv || !deinit_args ||
+ (arg_size != sizeof(struct cam_isp_resource_node))) {
+ CAM_ERR(CAM_ISP, "CSID:Invalid arguments");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "Enter");
+ res = (struct cam_isp_resource_node *)deinit_args;
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+
+ mutex_lock(&csid_hw->hw_info->hw_mutex);
+ if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state",
+ csid_hw->hw_intf->hw_idx,
+ res->res_id);
+ goto end;
+ }
+
+ switch (res->res_type) {
+ case CAM_ISP_RESOURCE_CID:
+ CAM_DBG(CAM_ISP, "De-Init ife_csid");
+ rc = cam_ife_csid_disable_csi2(csid_hw, res);
+ break;
+ case CAM_ISP_RESOURCE_PIX_PATH:
+ CAM_DBG(CAM_ISP, "De-Init Pix Path: %d\n", res->res_id);
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP ||
+ res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
+ rc = cam_ife_csid_deinit_pxl_path(csid_hw, res);
+ else
+ rc = cam_ife_csid_deinit_rdi_path(csid_hw, res);
+
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type);
+ goto end;
+ }
+
+ /* Disable CSID HW */
+ CAM_DBG(CAM_ISP, "Disabling CSID Hw\n");
+ cam_ife_csid_disable_hw(csid_hw);
+ CAM_DBG(CAM_ISP, "%s: Exit\n", __func__);
+
+end:
+ mutex_unlock(&csid_hw->hw_info->hw_mutex);
+ return rc;
+}
+
+static int cam_ife_csid_start(void *hw_priv, void *start_args,
+ uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_isp_resource_node *res;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+
+ if (!hw_priv || !start_args ||
+ (arg_size != sizeof(struct cam_isp_resource_node))) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
+ return -EINVAL;
+ }
+
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+ res = (struct cam_isp_resource_node *)start_args;
+ csid_reg = csid_hw->csid_info->csid_reg;
+
+ if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+ res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+ (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+ res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res tpe:%d res id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* Reset sof irq debug fields */
+ csid_hw->sof_irq_triggered = false;
+ csid_hw->irq_debug_cnt = 0;
+
+ CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res_id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+ switch (res->res_type) {
+ case CAM_ISP_RESOURCE_CID:
+ if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
+ rc = cam_ife_csid_tpg_start(csid_hw, res);
+ break;
+ case CAM_ISP_RESOURCE_PIX_PATH:
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP ||
+ res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
+ rc = cam_ife_csid_enable_pxl_path(csid_hw, res);
+ else
+ rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type);
+ break;
+ }
+end:
+ return rc;
+}
+
+static int cam_ife_csid_stop(void *hw_priv,
+ void *stop_args, uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_isp_resource_node *res;
+ struct cam_csid_hw_stop_args *csid_stop;
+ uint32_t i;
+
+ if (!hw_priv || !stop_args ||
+ (arg_size != sizeof(struct cam_csid_hw_stop_args))) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
+ return -EINVAL;
+ }
+ csid_stop = (struct cam_csid_hw_stop_args *) stop_args;
+
+ if (!csid_stop->num_res) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
+ return -EINVAL;
+ }
+
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+ CAM_DBG(CAM_ISP, "CSID:%d num_res %d",
+ csid_hw->hw_intf->hw_idx,
+ csid_stop->num_res);
+
+ /* Stop the resource first */
+ for (i = 0; i < csid_stop->num_res; i++) {
+ res = csid_stop->node_res[i];
+ CAM_DBG(CAM_ISP, "CSID:%d res_type %d res_id %d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type, res->res_id);
+ switch (res->res_type) {
+ case CAM_ISP_RESOURCE_CID:
+ if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
+ rc = cam_ife_csid_tpg_stop(csid_hw, res);
+ break;
+ case CAM_ISP_RESOURCE_PIX_PATH:
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP ||
+ res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
+ rc = cam_ife_csid_disable_pxl_path(csid_hw,
+ res, csid_stop->stop_cmd);
+ else
+ rc = cam_ife_csid_disable_rdi_path(csid_hw,
+ res, csid_stop->stop_cmd);
+
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+ csid_hw->hw_intf->hw_idx,
+ res->res_type);
+ break;
+ }
+ }
+
+ for (i = 0; i < csid_stop->num_res; i++) {
+ res = csid_stop->node_res[i];
+ res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+ }
+
+ CAM_DBG(CAM_ISP, "%s: Exit\n", __func__);
+
+ return rc;
+
+}
+
+static int cam_ife_csid_read(void *hw_priv,
+ void *read_args, uint32_t arg_size)
+{
+ CAM_ERR(CAM_ISP, "CSID: un supported");
+
+ return -EINVAL;
+}
+
+static int cam_ife_csid_write(void *hw_priv,
+ void *write_args, uint32_t arg_size)
+{
+ CAM_ERR(CAM_ISP, "CSID: un supported");
+ return -EINVAL;
+}
+
+static int cam_ife_csid_sof_irq_debug(
+ struct cam_ife_csid_hw *csid_hw, void *cmd_args)
+{
+ int i = 0;
+ uint32_t val = 0;
+ bool sof_irq_enable = false;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ struct cam_hw_soc_info *soc_info;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ if (*((uint32_t *)cmd_args) == 1)
+ sof_irq_enable = true;
+
+ if (csid_hw->hw_info->hw_state ==
+ CAM_HW_STATE_POWER_DOWN) {
+ CAM_WARN(CAM_ISP,
+ "CSID powered down unable to %s sof irq",
+ (sof_irq_enable == true) ? "enable" : "disable");
+ return 0;
+ }
+
+ if (csid_reg->ipp_reg) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+
+ if (val) {
+ if (sof_irq_enable)
+ val |= CSID_PATH_INFO_INPUT_SOF;
+ else
+ val &= ~CSID_PATH_INFO_INPUT_SOF;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+ val = 0;
+ }
+ }
+
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+ if (val) {
+ if (sof_irq_enable)
+ val |= CSID_PATH_INFO_INPUT_SOF;
+ else
+ val &= ~CSID_PATH_INFO_INPUT_SOF;
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+ val = 0;
+ }
+ }
+
+ if (sof_irq_enable) {
+ csid_hw->csid_debug |= CSID_DEBUG_ENABLE_SOF_IRQ;
+ csid_hw->sof_irq_triggered = true;
+ } else {
+ csid_hw->csid_debug &= ~CSID_DEBUG_ENABLE_SOF_IRQ;
+ csid_hw->sof_irq_triggered = false;
+ }
+
+ CAM_INFO(CAM_ISP, "SOF freeze: CSID SOF irq %s",
+ (sof_irq_enable == true) ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int cam_ife_csid_set_csid_clock(
+ struct cam_ife_csid_hw *csid_hw, void *cmd_args)
+{
+ struct cam_ife_csid_clock_update_args *clk_update = NULL;
+
+ if (!csid_hw)
+ return -EINVAL;
+
+ clk_update =
+ (struct cam_ife_csid_clock_update_args *)cmd_args;
+
+ csid_hw->clk_rate = clk_update->clk_rate;
+ CAM_INFO(CAM_ISP, "CSID clock rate %llu", csid_hw->clk_rate);
+
+ return 0;
+}
+
+static int cam_ife_csid_process_cmd(void *hw_priv,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_isp_resource_node *res = NULL;
+
+ if (!hw_priv || !cmd_args) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
+ return -EINVAL;
+ }
+
+ csid_hw_info = (struct cam_hw_info *)hw_priv;
+ csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
+
+ switch (cmd_type) {
+ case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
+ rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+ res = ((struct cam_csid_get_time_stamp_args *)
+ cmd_args)->node_res;
+ cam_ife_csid_get_hbi_vbi(csid_hw, res);
+ }
+ break;
+ case CAM_IFE_CSID_SET_CSID_DEBUG:
+ rc = cam_ife_csid_set_csid_debug(csid_hw, cmd_args);
+ break;
+ case CAM_IFE_CSID_SOF_IRQ_DEBUG:
+ rc = cam_ife_csid_sof_irq_debug(csid_hw, cmd_args);
+ break;
+ case CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE:
+ rc = cam_ife_csid_set_csid_clock(csid_hw, cmd_args);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d",
+ csid_hw->hw_intf->hw_idx, cmd_type);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+
+}
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
+{
+ struct cam_ife_csid_hw *csid_hw;
+ struct cam_hw_soc_info *soc_info;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ const struct cam_ife_csid_csi2_rx_reg_offset *csi2_reg;
+ uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0;
+ uint32_t irq_status_rdi[4] = {0, 0, 0, 0};
+ uint32_t val, irq_status_ppp = 0;
+ bool fatal_err_detected = false;
+ uint32_t sof_irq_debug_en = 0;
+
+ csid_hw = (struct cam_ife_csid_hw *)data;
+
+ CAM_DBG(CAM_ISP, "CSID %d IRQ Handling", csid_hw->hw_intf->hw_idx);
+
+ if (!data) {
+ CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
+ return IRQ_HANDLED;
+ }
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+ csi2_reg = csid_reg->csi2_reg;
+
+ /* read */
+ irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_top_irq_status_addr);
+
+ irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
+
+ if (csid_reg->cmn_reg->num_pix)
+ irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_status_addr);
+
+ if (csid_reg->cmn_reg->num_ppp)
+ irq_status_ppp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_irq_status_addr);
+
+
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+ irq_status_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
+
+ /* clear */
+ cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+ if (csid_reg->cmn_reg->num_pix)
+ cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+ if (csid_reg->cmn_reg->num_ppp)
+ cam_io_w_mb(irq_status_ppp, soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
+
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+ cam_io_w_mb(irq_status_rdi[i], soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+ }
+ cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+ csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+ CAM_DBG(CAM_ISP, "irq_status_top = 0x%x", irq_status_top);
+ CAM_DBG(CAM_ISP, "irq_status_rx = 0x%x", irq_status_rx);
+ CAM_DBG(CAM_ISP, "irq_status_ipp = 0x%x", irq_status_ipp);
+ CAM_DBG(CAM_ISP, "irq_status_ppp = 0x%x", irq_status_ppp);
+ CAM_DBG(CAM_ISP, "irq_status_rdi0= 0x%x", irq_status_rdi[0]);
+ CAM_DBG(CAM_ISP, "irq_status_rdi1= 0x%x", irq_status_rdi[1]);
+ CAM_DBG(CAM_ISP, "irq_status_rdi2= 0x%x", irq_status_rdi[2]);
+
+ if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
+ CAM_DBG(CAM_ISP, "csi rx reset complete");
+ complete(&csid_hw->csid_csi2_complete);
+ }
+
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
+ csid_hw->hw_intf->hw_idx);
+ fatal_err_detected = true;
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
+ csid_hw->hw_intf->hw_idx);
+ fatal_err_detected = true;
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
+ csid_hw->hw_intf->hw_idx);
+ fatal_err_detected = true;
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
+ csid_hw->hw_intf->hw_idx);
+ fatal_err_detected = true;
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER FLOW",
+ csid_hw->hw_intf->hw_idx);
+ fatal_err_detected = true;
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_EOT_RECEPTION",
+ csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_SOT_RECEPTION",
+ csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PH_CRC",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_CRC) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_CRC",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_ECC) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_ECC",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_MMAPPED_VC_DT) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d MMAPPED_VC_DT",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_STREAM_UNDERFLOW",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d UNBOUNDED_FRAME",
+ csid_hw->hw_intf->hw_idx);
+ }
+
+ if (csid_hw->error_irq_count >
+ CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT) {
+ fatal_err_detected = true;
+ csid_hw->error_irq_count = 0;
+ }
+
+ if (fatal_err_detected)
+ cam_ife_csid_halt_csi2(csid_hw);
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
+ if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL0_EOT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL1_EOT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL2_EOT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL3_EOT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ }
+ }
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ) {
+ if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL0_SOT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL1_SOT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL2_SOT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ }
+ if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PHY_DL3_SOT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ }
+ }
+
+ if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) &&
+ (irq_status_rx & CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED)) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d LONG_PKT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csi2_reg->csid_csi2_rx_captured_long_pkt_0_addr);
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d long packet VC :%d DT:%d WC:%d",
+ csid_hw->hw_intf->hw_idx,
+ (val >> 22), ((val >> 16) & 0x3F), (val & 0xFFFF));
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csi2_reg->csid_csi2_rx_captured_long_pkt_1_addr);
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d long packet ECC :%d",
+ csid_hw->hw_intf->hw_idx, val);
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csi2_reg->csid_csi2_rx_captured_long_pkt_ftr_addr);
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d long pkt cal CRC:%d expected CRC:%d",
+ csid_hw->hw_intf->hw_idx, (val >> 16), (val & 0xFFFF));
+ }
+ if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) &&
+ (irq_status_rx & CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED)) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d SHORT_PKT_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csi2_reg->csid_csi2_rx_captured_short_pkt_0_addr);
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d short pkt VC :%d DT:%d LC:%d",
+ csid_hw->hw_intf->hw_idx,
+ (val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csi2_reg->csid_csi2_rx_captured_short_pkt_1_addr);
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d short packet ECC :%d",
+ csid_hw->hw_intf->hw_idx, val);
+ }
+
+ if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) &&
+ (irq_status_rx & CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED)) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PKT_HDR_CAPTURED",
+ csid_hw->hw_intf->hw_idx);
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csi2_reg->csid_csi2_rx_captured_cphy_pkt_hdr_addr);
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID:%d cphy packet VC :%d DT:%d WC:%d",
+ csid_hw->hw_intf->hw_idx,
+ (val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
+ }
+
+ /*read the IPP errors */
+ if (csid_reg->cmn_reg->num_pix) {
+ /* IPP reset done bit */
+ if (irq_status_ipp &
+ BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+ CAM_DBG(CAM_ISP, "CSID IPP reset complete");
+ complete(&csid_hw->csid_ipp_complete);
+ }
+
+ if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) &&
+ (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received",
+ csid_hw->hw_intf->hw_idx);
+ if (csid_hw->sof_irq_triggered)
+ csid_hw->irq_debug_cnt++;
+ }
+
+ if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) &&
+ (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
+ csid_hw->hw_intf->hw_idx);
+
+ if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d IPP fifo over flow",
+ csid_hw->hw_intf->hw_idx);
+ /* Stop IPP path immediately */
+ cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_pxl_ctrl_addr);
+ }
+ }
+
+ /*read PPP errors */
+ if (csid_reg->cmn_reg->num_ppp) {
+ /* PPP reset done bit */
+ if (irq_status_ppp &
+ BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+ CAM_DBG(CAM_ISP, "CSID PPP reset complete");
+ complete(&csid_hw->csid_ppp_complete);
+ }
+
+ if ((irq_status_ppp & CSID_PATH_INFO_INPUT_SOF) &&
+ (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP SOF received",
+ csid_hw->hw_intf->hw_idx);
+ if (csid_hw->sof_irq_triggered)
+ csid_hw->irq_debug_cnt++;
+ }
+
+ if ((irq_status_ppp & CSID_PATH_INFO_INPUT_EOF) &&
+ (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP EOF received",
+ csid_hw->hw_intf->hw_idx);
+
+ if (irq_status_ppp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d PPP fifo over flow",
+ csid_hw->hw_intf->hw_idx);
+ /* Stop PPP path immediately */
+ cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->ppp_reg->csid_pxl_ctrl_addr);
+ }
+ }
+
+ for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+ if (irq_status_rdi[i] &
+ BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+ CAM_DBG(CAM_ISP, "CSID RDI%d reset complete", i);
+ complete(&csid_hw->csid_rdin_complete[i]);
+ }
+
+ if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) &&
+ (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID RDI:%d SOF received", i);
+ if (csid_hw->sof_irq_triggered)
+ csid_hw->irq_debug_cnt++;
+ }
+
+ if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF) &&
+ (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
+ CAM_INFO_RATE_LIMIT(CAM_ISP,
+ "CSID RDI:%d EOF received", i);
+
+ if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "CSID:%d RDI fifo over flow",
+ csid_hw->hw_intf->hw_idx);
+ /* Stop RDI path immediately */
+ cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
+ }
+ }
+
+ if (csid_hw->irq_debug_cnt >= CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX) {
+ cam_ife_csid_sof_irq_debug(csid_hw, &sof_irq_debug_en);
+ csid_hw->irq_debug_cnt = 0;
+ }
+
+ CAM_DBG(CAM_ISP, "IRQ Handling exit");
+ return IRQ_HANDLED;
+}
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
+ uint32_t csid_idx)
+{
+ int rc = -EINVAL;
+ uint32_t i;
+ uint32_t num_paths;
+ struct cam_ife_csid_path_cfg *path_data;
+ struct cam_ife_csid_cid_data *cid_data;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_ife_csid_hw *ife_csid_hw = NULL;
+
+ if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
+ CAM_ERR(CAM_ISP, "Invalid csid index:%d", csid_idx);
+ return rc;
+ }
+
+ csid_hw_info = (struct cam_hw_info *) csid_hw_intf->hw_priv;
+ ife_csid_hw = (struct cam_ife_csid_hw *) csid_hw_info->core_info;
+
+ ife_csid_hw->hw_intf = csid_hw_intf;
+ ife_csid_hw->hw_info = csid_hw_info;
+
+ CAM_DBG(CAM_ISP, "type %d index %d",
+ ife_csid_hw->hw_intf->hw_type, csid_idx);
+
+
+ ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&ife_csid_hw->hw_info->hw_mutex);
+ spin_lock_init(&ife_csid_hw->hw_info->hw_lock);
+ init_completion(&ife_csid_hw->hw_info->hw_complete);
+
+ init_completion(&ife_csid_hw->csid_top_complete);
+ init_completion(&ife_csid_hw->csid_csi2_complete);
+ init_completion(&ife_csid_hw->csid_ipp_complete);
+ init_completion(&ife_csid_hw->csid_ppp_complete);
+ for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++)
+ init_completion(&ife_csid_hw->csid_rdin_complete[i]);
+
+
+ rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
+ cam_ife_csid_irq, ife_csid_hw);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "CSID:%d Failed to init_soc", csid_idx);
+ goto err;
+ }
+
+ ife_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_get_hw_caps;
+ ife_csid_hw->hw_intf->hw_ops.init = cam_ife_csid_init_hw;
+ ife_csid_hw->hw_intf->hw_ops.deinit = cam_ife_csid_deinit_hw;
+ ife_csid_hw->hw_intf->hw_ops.reset = cam_ife_csid_reset;
+ ife_csid_hw->hw_intf->hw_ops.reserve = cam_ife_csid_reserve;
+ ife_csid_hw->hw_intf->hw_ops.release = cam_ife_csid_release;
+ ife_csid_hw->hw_intf->hw_ops.start = cam_ife_csid_start;
+ ife_csid_hw->hw_intf->hw_ops.stop = cam_ife_csid_stop;
+ ife_csid_hw->hw_intf->hw_ops.read = cam_ife_csid_read;
+ ife_csid_hw->hw_intf->hw_ops.write = cam_ife_csid_write;
+ ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
+
+ num_paths = ife_csid_hw->csid_info->csid_reg->cmn_reg->num_pix +
+ ife_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+ /* Initialize the CID resource */
+ for (i = 0; i < num_paths; i++) {
+ ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
+ ife_csid_hw->cid_res[i].res_id = i;
+ ife_csid_hw->cid_res[i].res_state =
+ CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ ife_csid_hw->cid_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+ cid_data = kzalloc(sizeof(struct cam_ife_csid_cid_data),
+ GFP_KERNEL);
+ if (!cid_data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ ife_csid_hw->cid_res[i].res_priv = cid_data;
+ }
+
+ /* Initialize the IPP resources */
+ if (ife_csid_hw->csid_info->csid_reg->cmn_reg->num_pix) {
+ ife_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+ ife_csid_hw->ipp_res.res_id = CAM_IFE_PIX_PATH_RES_IPP;
+ ife_csid_hw->ipp_res.res_state =
+ CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ ife_csid_hw->ipp_res.hw_intf = ife_csid_hw->hw_intf;
+ path_data = kzalloc(sizeof(*path_data),
+ GFP_KERNEL);
+ if (!path_data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ ife_csid_hw->ipp_res.res_priv = path_data;
+ }
+
+ /* Initialize PPP resource */
+ if (ife_csid_hw->csid_info->csid_reg->cmn_reg->num_ppp) {
+ ife_csid_hw->ppp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+ ife_csid_hw->ppp_res.res_id = CAM_IFE_PIX_PATH_RES_PPP;
+ ife_csid_hw->ppp_res.res_state =
+ CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ ife_csid_hw->ppp_res.hw_intf = ife_csid_hw->hw_intf;
+ path_data = kzalloc(sizeof(*path_data),
+ GFP_KERNEL);
+ if (!path_data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ ife_csid_hw->ppp_res.res_priv = path_data;
+ }
+
+ /* Initialize the RDI resource */
+ for (i = 0; i < ife_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+ i++) {
+ /* res type is from RDI 0 to RDI3 */
+ ife_csid_hw->rdi_res[i].res_type =
+ CAM_ISP_RESOURCE_PIX_PATH;
+ ife_csid_hw->rdi_res[i].res_id = i;
+ ife_csid_hw->rdi_res[i].res_state =
+ CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ ife_csid_hw->rdi_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+ path_data = kzalloc(sizeof(*path_data),
+ GFP_KERNEL);
+ if (!path_data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ ife_csid_hw->rdi_res[i].res_priv = path_data;
+ }
+
+ ife_csid_hw->csid_debug = 0;
+ ife_csid_hw->error_irq_count = 0;
+
+ return 0;
+err:
+ if (rc) {
+ kfree(ife_csid_hw->ipp_res.res_priv);
+ kfree(ife_csid_hw->ppp_res.res_priv);
+ for (i = 0; i <
+ ife_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+ i++)
+ kfree(ife_csid_hw->rdi_res[i].res_priv);
+
+ for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+ kfree(ife_csid_hw->cid_res[i].res_priv);
+
+ }
+
+ return rc;
+}
+
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw)
+{
+ int rc = -EINVAL;
+ uint32_t i;
+
+ if (!ife_csid_hw) {
+ CAM_ERR(CAM_ISP, "Invalid param");
+ return rc;
+ }
+
+ /* release the privdate data memory from resources */
+ kfree(ife_csid_hw->ipp_res.res_priv);
+ kfree(ife_csid_hw->ppp_res.res_priv);
+ for (i = 0; i <
+ ife_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+ i++) {
+ kfree(ife_csid_hw->rdi_res[i].res_priv);
+ }
+ for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+ kfree(ife_csid_hw->cid_res[i].res_priv);
+
+ cam_ife_csid_deinit_soc_resources(&ife_csid_hw->hw_info->soc_info);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
new file mode 100644
index 000000000000..43fd9b03132e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -0,0 +1,487 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_HW_H_
+#define _CAM_IFE_CSID_HW_H_
+
+#include "cam_hw.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_ife_csid_soc.h"
+
+#define CAM_IFE_CSID_HW_RES_MAX 4
+#define CAM_IFE_CSID_CID_RES_MAX 4
+#define CAM_IFE_CSID_RDI_MAX 4
+
+#define CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED BIT(0)
+#define CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED BIT(1)
+#define CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED BIT(2)
+#define CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED BIT(3)
+#define CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED BIT(4)
+#define CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED BIT(5)
+#define CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED BIT(6)
+#define CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED BIT(7)
+#define CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED BIT(8)
+#define CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED BIT(9)
+#define CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED BIT(10)
+#define CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION BIT(11)
+#define CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION BIT(12)
+#define CSID_CSI2_RX_ERROR_CPHY_PH_CRC BIT(13)
+#define CSID_CSI2_RX_WARNING_ECC BIT(14)
+#define CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW BIT(15)
+#define CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW BIT(16)
+#define CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW BIT(17)
+#define CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW BIT(18)
+#define CSID_CSI2_RX_ERROR_CRC BIT(19)
+#define CSID_CSI2_RX_ERROR_ECC BIT(20)
+#define CSID_CSI2_RX_ERROR_MMAPPED_VC_DT BIT(21)
+#define CSID_CSI2_RX_ERROR_UNMAPPED_VC_DT BIT(22)
+#define CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW BIT(23)
+#define CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME BIT(24)
+#define CSID_CSI2_RX_INFO_TG_DONE BIT(25)
+#define CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW BIT(26)
+#define CSID_CSI2_RX_INFO_RST_DONE BIT(27)
+
+#define CSID_PATH_INFO_RST_DONE BIT(1)
+#define CSID_PATH_ERROR_FIFO_OVERFLOW BIT(2)
+#define CSID_PATH_INFO_SUBSAMPLED_EOF BIT(3)
+#define CSID_PATH_INFO_SUBSAMPLED_SOF BIT(4)
+#define CSID_PATH_INFO_FRAME_DROP_EOF BIT(5)
+#define CSID_PATH_INFO_FRAME_DROP_EOL BIT(6)
+#define CSID_PATH_INFO_FRAME_DROP_SOL BIT(7)
+#define CSID_PATH_INFO_FRAME_DROP_SOF BIT(8)
+#define CSID_PATH_INFO_INPUT_EOF BIT(9)
+#define CSID_PATH_INFO_INPUT_EOL BIT(10)
+#define CSID_PATH_INFO_INPUT_SOL BIT(11)
+#define CSID_PATH_INFO_INPUT_SOF BIT(12)
+#define CSID_PATH_ERROR_PIX_COUNT BIT(13)
+#define CSID_PATH_ERROR_LINE_COUNT BIT(14)
+
+/*
+ * Debug values enable the corresponding interrupts and debug logs provide
+ * necessary information
+ */
+#define CSID_DEBUG_ENABLE_SOF_IRQ BIT(0)
+#define CSID_DEBUG_ENABLE_EOF_IRQ BIT(1)
+#define CSID_DEBUG_ENABLE_SOT_IRQ BIT(2)
+#define CSID_DEBUG_ENABLE_EOT_IRQ BIT(3)
+#define CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE BIT(4)
+#define CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE BIT(5)
+#define CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE BIT(6)
+#define CSID_DEBUG_ENABLE_HBI_VBI_INFO BIT(7)
+#define CSID_DEBUG_DISABLE_EARLY_EOF BIT(8)
+
+/* enum cam_csid_path_halt_mode select the path halt mode control */
+enum cam_csid_path_halt_mode {
+ CSID_HALT_MODE_INTERNAL,
+ CSID_HALT_MODE_GLOBAL,
+ CSID_HALT_MODE_MASTER,
+ CSID_HALT_MODE_SLAVE,
+};
+
+/**
+ *enum cam_csid_path_timestamp_stb_sel - select the sof/eof strobes used to
+ * capture the timestamp
+ */
+enum cam_csid_path_timestamp_stb_sel {
+ CSID_TIMESTAMP_STB_PRE_HALT,
+ CSID_TIMESTAMP_STB_POST_HALT,
+ CSID_TIMESTAMP_STB_POST_IRQ,
+ CSID_TIMESTAMP_STB_MAX,
+};
+
+struct cam_ife_csid_pxl_reg_offset {
+ /* Pxl path register offsets*/
+ uint32_t csid_pxl_irq_status_addr;
+ uint32_t csid_pxl_irq_mask_addr;
+ uint32_t csid_pxl_irq_clear_addr;
+ uint32_t csid_pxl_irq_set_addr;
+
+ uint32_t csid_pxl_cfg0_addr;
+ uint32_t csid_pxl_cfg1_addr;
+ uint32_t csid_pxl_ctrl_addr;
+ uint32_t csid_pxl_frm_drop_pattern_addr;
+ uint32_t csid_pxl_frm_drop_period_addr;
+ uint32_t csid_pxl_irq_subsample_pattern_addr;
+ uint32_t csid_pxl_irq_subsample_period_addr;
+ uint32_t csid_pxl_hcrop_addr;
+ uint32_t csid_pxl_vcrop_addr;
+ uint32_t csid_pxl_pix_drop_pattern_addr;
+ uint32_t csid_pxl_pix_drop_period_addr;
+ uint32_t csid_pxl_line_drop_pattern_addr;
+ uint32_t csid_pxl_line_drop_period_addr;
+ uint32_t csid_pxl_rst_strobes_addr;
+ uint32_t csid_pxl_status_addr;
+ uint32_t csid_pxl_misr_val_addr;
+ uint32_t csid_pxl_format_measure_cfg0_addr;
+ uint32_t csid_pxl_format_measure_cfg1_addr;
+ uint32_t csid_pxl_format_measure0_addr;
+ uint32_t csid_pxl_format_measure1_addr;
+ uint32_t csid_pxl_format_measure2_addr;
+ uint32_t csid_pxl_timestamp_curr0_sof_addr;
+ uint32_t csid_pxl_timestamp_curr1_sof_addr;
+ uint32_t csid_pxl_timestamp_perv0_sof_addr;
+ uint32_t csid_pxl_timestamp_perv1_sof_addr;
+ uint32_t csid_pxl_timestamp_curr0_eof_addr;
+ uint32_t csid_pxl_timestamp_curr1_eof_addr;
+ uint32_t csid_pxl_timestamp_perv0_eof_addr;
+ uint32_t csid_pxl_timestamp_perv1_eof_addr;
+
+ /* configuration */
+ uint32_t pix_store_en_shift_val;
+ uint32_t early_eof_en_shift_val;
+};
+
+struct cam_ife_csid_rdi_reg_offset {
+ uint32_t csid_rdi_irq_status_addr;
+ uint32_t csid_rdi_irq_mask_addr;
+ uint32_t csid_rdi_irq_clear_addr;
+ uint32_t csid_rdi_irq_set_addr;
+
+ /*RDI N register address */
+ uint32_t csid_rdi_cfg0_addr;
+ uint32_t csid_rdi_cfg1_addr;
+ uint32_t csid_rdi_ctrl_addr;
+ uint32_t csid_rdi_frm_drop_pattern_addr;
+ uint32_t csid_rdi_frm_drop_period_addr;
+ uint32_t csid_rdi_irq_subsample_pattern_addr;
+ uint32_t csid_rdi_irq_subsample_period_addr;
+ uint32_t csid_rdi_rpp_hcrop_addr;
+ uint32_t csid_rdi_rpp_vcrop_addr;
+ uint32_t csid_rdi_rpp_pix_drop_pattern_addr;
+ uint32_t csid_rdi_rpp_pix_drop_period_addr;
+ uint32_t csid_rdi_rpp_line_drop_pattern_addr;
+ uint32_t csid_rdi_rpp_line_drop_period_addr;
+ uint32_t csid_rdi_yuv_chroma_conversion_addr;
+ uint32_t csid_rdi_rst_strobes_addr;
+ uint32_t csid_rdi_status_addr;
+ uint32_t csid_rdi_misr_val0_addr;
+ uint32_t csid_rdi_misr_val1_addr;
+ uint32_t csid_rdi_misr_val2_addr;
+ uint32_t csid_rdi_misr_val3_addr;
+ uint32_t csid_rdi_format_measure_cfg0_addr;
+ uint32_t csid_rdi_format_measure_cfg1_addr;
+ uint32_t csid_rdi_format_measure0_addr;
+ uint32_t csid_rdi_format_measure1_addr;
+ uint32_t csid_rdi_format_measure2_addr;
+ uint32_t csid_rdi_timestamp_curr0_sof_addr;
+ uint32_t csid_rdi_timestamp_curr1_sof_addr;
+ uint32_t csid_rdi_timestamp_prev0_sof_addr;
+ uint32_t csid_rdi_timestamp_prev1_sof_addr;
+ uint32_t csid_rdi_timestamp_curr0_eof_addr;
+ uint32_t csid_rdi_timestamp_curr1_eof_addr;
+ uint32_t csid_rdi_timestamp_prev0_eof_addr;
+ uint32_t csid_rdi_timestamp_prev1_eof_addr;
+ uint32_t csid_rdi_byte_cntr_ping_addr;
+ uint32_t csid_rdi_byte_cntr_pong_addr;
+};
+
+struct cam_ife_csid_csi2_rx_reg_offset {
+ uint32_t csid_csi2_rx_irq_status_addr;
+ uint32_t csid_csi2_rx_irq_mask_addr;
+ uint32_t csid_csi2_rx_irq_clear_addr;
+ uint32_t csid_csi2_rx_irq_set_addr;
+ uint32_t csid_csi2_rx_cfg0_addr;
+ uint32_t csid_csi2_rx_cfg1_addr;
+ uint32_t csid_csi2_rx_capture_ctrl_addr;
+ uint32_t csid_csi2_rx_rst_strobes_addr;
+ uint32_t csid_csi2_rx_de_scramble_cfg0_addr;
+ uint32_t csid_csi2_rx_de_scramble_cfg1_addr; /* */
+ uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr;
+ uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr;
+ uint32_t csid_csi2_rx_captured_short_pkt_0_addr;
+ uint32_t csid_csi2_rx_captured_short_pkt_1_addr;
+ uint32_t csid_csi2_rx_captured_long_pkt_0_addr;
+ uint32_t csid_csi2_rx_captured_long_pkt_1_addr;
+ uint32_t csid_csi2_rx_captured_long_pkt_ftr_addr;
+ uint32_t csid_csi2_rx_captured_cphy_pkt_hdr_addr;
+ uint32_t csid_csi2_rx_lane0_misr_addr;
+ uint32_t csid_csi2_rx_lane1_misr_addr;
+ uint32_t csid_csi2_rx_lane2_misr_addr;
+ uint32_t csid_csi2_rx_lane3_misr_addr;
+ uint32_t csid_csi2_rx_total_pkts_rcvd_addr;
+ uint32_t csid_csi2_rx_stats_ecc_addr;
+ uint32_t csid_csi2_rx_total_crc_err_addr;
+
+ /*configurations */
+ uint32_t csi2_rst_srb_all;
+ uint32_t csi2_rst_done_shift_val;
+ uint32_t csi2_irq_mask_all;
+ uint32_t csi2_misr_enable_shift_val;
+ uint32_t csi2_vc_mode_shift_val;
+ uint32_t csi2_capture_long_pkt_en_shift;
+ uint32_t csi2_capture_short_pkt_en_shift;
+ uint32_t csi2_capture_cphy_pkt_en_shift;
+ uint32_t csi2_capture_long_pkt_dt_shift;
+ uint32_t csi2_capture_long_pkt_vc_shift;
+ uint32_t csi2_capture_short_pkt_vc_shift;
+ uint32_t csi2_capture_cphy_pkt_dt_shift;
+ uint32_t csi2_capture_cphy_pkt_vc_shift;
+};
+
+struct cam_ife_csid_csi2_tpg_reg_offset {
+ uint32_t csid_tpg_ctrl_addr;
+ uint32_t csid_tpg_vc_cfg0_addr;
+ uint32_t csid_tpg_vc_cfg1_addr;
+ uint32_t csid_tpg_lfsr_seed_addr;
+ uint32_t csid_tpg_dt_n_cfg_0_addr;
+ uint32_t csid_tpg_dt_n_cfg_1_addr;
+ uint32_t csid_tpg_dt_n_cfg_2_addr;
+ uint32_t csid_tpg_color_bars_cfg_addr;
+ uint32_t csid_tpg_color_box_cfg_addr;
+ uint32_t csid_tpg_common_gen_cfg_addr;
+ uint32_t csid_tpg_cgen_n_cfg_addr;
+ uint32_t csid_tpg_cgen_n_x0_addr;
+ uint32_t csid_tpg_cgen_n_x1_addr;
+ uint32_t csid_tpg_cgen_n_x2_addr;
+ uint32_t csid_tpg_cgen_n_xy_addr;
+ uint32_t csid_tpg_cgen_n_y1_addr;
+ uint32_t csid_tpg_cgen_n_y2_addr;
+
+ /*configurations */
+ uint32_t tpg_dtn_cfg_offset;
+ uint32_t tpg_cgen_cfg_offset;
+ uint32_t tpg_cpas_ife_reg_offset;
+
+};
+struct cam_ife_csid_common_reg_offset {
+ /* MIPI CSID registers */
+ uint32_t csid_hw_version_addr;
+ uint32_t csid_cfg0_addr;
+ uint32_t csid_ctrl_addr;
+ uint32_t csid_reset_addr;
+ uint32_t csid_rst_strobes_addr;
+
+ uint32_t csid_test_bus_ctrl_addr;
+ uint32_t csid_top_irq_status_addr;
+ uint32_t csid_top_irq_mask_addr;
+ uint32_t csid_top_irq_clear_addr;
+ uint32_t csid_top_irq_set_addr;
+ uint32_t csid_irq_cmd_addr;
+
+ /*configurations */
+ uint32_t major_version;
+ uint32_t minor_version;
+ uint32_t version_incr;
+ uint32_t num_rdis;
+ uint32_t num_pix;
+ uint32_t num_ppp;
+ uint32_t csid_rst_stb;
+ uint32_t csid_rst_stb_sw_all;
+ uint32_t path_rst_stb_all;
+ uint32_t path_rst_done_shift_val;
+ uint32_t path_en_shift_val;
+ uint32_t dt_id_shift_val;
+ uint32_t vc_shift_val;
+ uint32_t dt_shift_val;
+ uint32_t fmt_shift_val;
+ uint32_t plain_fmt_shit_val;
+ uint32_t crop_v_en_shift_val;
+ uint32_t crop_h_en_shift_val;
+ uint32_t crop_shift;
+ uint32_t ipp_irq_mask_all;
+ uint32_t rdi_irq_mask_all;
+ uint32_t ppp_irq_mask_all;
+ uint32_t measure_en_hbi_vbi_cnt_mask;
+ uint32_t format_measure_en_val;
+};
+
+/**
+ * struct cam_ife_csid_reg_offset- CSID instance register info
+ *
+ * @cmn_reg: csid common registers info
+ * @ipp_reg: ipp register offset information
+ * @ppp_reg: ppp register offset information
+ * @rdi_reg: rdi register offser information
+ *
+ */
+struct cam_ife_csid_reg_offset {
+ const struct cam_ife_csid_common_reg_offset *cmn_reg;
+ const struct cam_ife_csid_csi2_rx_reg_offset *csi2_reg;
+ const struct cam_ife_csid_pxl_reg_offset *ipp_reg;
+ const struct cam_ife_csid_pxl_reg_offset *ppp_reg;
+ const struct cam_ife_csid_rdi_reg_offset *rdi_reg[CAM_IFE_CSID_RDI_MAX];
+ const struct cam_ife_csid_csi2_tpg_reg_offset *tpg_reg;
+};
+
+
+/**
+ * struct cam_ife_csid_hw_info- CSID HW info
+ *
+ * @csid_reg: csid register offsets
+ * @hw_dts_version: HW DTS version
+ * @csid_max_clk: maximim csid clock
+ *
+ */
+struct cam_ife_csid_hw_info {
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ uint32_t hw_dts_version;
+ uint32_t csid_max_clk;
+
+};
+
+
+
+/**
+ * struct cam_ife_csid_csi2_rx_cfg- csid csi2 rx configuration data
+ * @phy_sel: input resource type for sensor only
+ * @lane_type: lane type: c-phy or d-phy
+ * @lane_num : active lane number
+ * @lane_cfg: lane configurations: 4 bits per lane
+ *
+ */
+struct cam_ife_csid_csi2_rx_cfg {
+ uint32_t phy_sel;
+ uint32_t lane_type;
+ uint32_t lane_num;
+ uint32_t lane_cfg;
+};
+
+/**
+ * struct cam_ife_csid_tpg_cfg- csid tpg configuration data
+ * @width: width
+ * @height: height
+ * @test_pattern : pattern
+ * @in_format: decode format
+ * @usage_type: whether dual isp is required
+ *
+ */
+struct cam_ife_csid_tpg_cfg {
+ uint32_t width;
+ uint32_t height;
+ uint32_t test_pattern;
+ uint32_t in_format;
+ uint32_t usage_type;
+};
+
+/**
+ * struct cam_ife_csid_cid_data- cid configuration private data
+ *
+ * @vc: Virtual channel
+ * @dt: Data type
+ * @cnt: Cid resource reference count.
+ * @tpg_set: Tpg used for this cid resource
+ *
+ */
+struct cam_ife_csid_cid_data {
+ uint32_t vc;
+ uint32_t dt;
+ uint32_t cnt;
+ uint32_t tpg_set;
+};
+
+
+/**
+ * struct cam_ife_csid_path_cfg- csid path configuration details. It is stored
+ * as private data for IPP/ RDI paths
+ * @vc : Virtual channel number
+ * @dt : Data type number
+ * @cid cid number, it is same as DT_ID number in HW
+ * @in_format: input decode format
+ * @out_format: output format
+ * @crop_enable: crop is enable or disabled, if enabled
+ * then remaining parameters are valid.
+ * @start_pixel: start pixel
+ * @end_pixel: end_pixel
+ * @width: width
+ * @start_line: start line
+ * @end_line: end_line
+ * @height: heigth
+ * @sync_mode: Applicable for IPP/RDI path reservation
+ * Reserving the path for master IPP or slave IPP
+ * master (set value 1), Slave ( set value 2)
+ * for RDI, set mode to none
+ * @master_idx: For Slave reservation, Give master IFE instance Index.
+ * Slave will synchronize with master Start and stop operations
+ * @clk_rate Clock rate
+ *
+ */
+struct cam_ife_csid_path_cfg {
+ uint32_t vc;
+ uint32_t dt;
+ uint32_t cid;
+ uint32_t in_format;
+ uint32_t out_format;
+ bool crop_enable;
+ uint32_t start_pixel;
+ uint32_t end_pixel;
+ uint32_t width;
+ uint32_t start_line;
+ uint32_t end_line;
+ uint32_t height;
+ enum cam_isp_hw_sync_mode sync_mode;
+ uint32_t master_idx;
+ uint64_t clk_rate;
+};
+
+/**
+ * struct cam_ife_csid_hw- csid hw device resources data
+ *
+ * @hw_intf: contain the csid hw interface information
+ * @hw_info: csid hw device information
+ * @csid_info: csid hw specific information
+ * @res_type: CSID in resource type
+ * @csi2_rx_cfg: Csi2 rx decoder configuration for csid
+ * @tpg_cfg: TPG configuration
+ * @csi2_rx_reserve_cnt: CSI2 reservations count value
+ * @csi2_cfg_cnt: csi2 configuration count
+ * @tpg_start_cnt: tpg start count
+ * @ipp_res: image pixel path resource
+ * @ppp_res: phase pxl path resource
+ * @rdi_res: raw dump image path resources
+ * @cid_res: cid resources state
+ * @csid_top_reset_complete: csid top reset completion
+ * @csid_csi2_reset_complete: csi2 reset completion
+ * @csid_ipp_reset_complete: ipp reset completion
+ * @csid_ppp_complete: ppp reset completion
+ * @csid_rdin_reset_complete: rdi n completion
+ * @csid_debug: csid debug information to enable the SOT, EOT,
+ * SOF, EOF, measure etc in the csid hw
+ * @clk_rate Clock rate
+ * @sof_irq_triggered: Flag is set on receiving event to enable sof irq
+ * incase of SOF freeze.
+ * @irq_debug_cnt: Counter to track sof irq's when above flag is set.
+ * @error_irq_count Error IRQ count, if continuous error irq comes
+ * need to stop the CSID and mask interrupts.
+ *
+ */
+struct cam_ife_csid_hw {
+ struct cam_hw_intf *hw_intf;
+ struct cam_hw_info *hw_info;
+ struct cam_ife_csid_hw_info *csid_info;
+ uint32_t res_type;
+ struct cam_ife_csid_csi2_rx_cfg csi2_rx_cfg;
+ struct cam_ife_csid_tpg_cfg tpg_cfg;
+ uint32_t csi2_reserve_cnt;
+ uint32_t csi2_cfg_cnt;
+ uint32_t tpg_start_cnt;
+ struct cam_isp_resource_node ipp_res;
+ struct cam_isp_resource_node ppp_res;
+ struct cam_isp_resource_node rdi_res[CAM_IFE_CSID_RDI_MAX];
+ struct cam_isp_resource_node cid_res[CAM_IFE_CSID_CID_RES_MAX];
+ struct completion csid_top_complete;
+ struct completion csid_csi2_complete;
+ struct completion csid_ipp_complete;
+ struct completion csid_ppp_complete;
+ struct completion csid_rdin_complete[CAM_IFE_CSID_RDI_MAX];
+ uint64_t csid_debug;
+ uint64_t clk_rate;
+ bool sof_irq_triggered;
+ uint32_t irq_debug_cnt;
+ uint32_t error_irq_count;
+};
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
+ uint32_t csid_idx);
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw);
+
+#endif /* _CAM_IFE_CSID_HW_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
new file mode 100644
index 000000000000..2556b65875db
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
@@ -0,0 +1,147 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_debug_util.h"
+
+static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_RES_MAX] = {
+ 0, 0, 0, 0};
+
+static char csid_dev_name[8];
+
+int cam_ife_csid_probe(struct platform_device *pdev)
+{
+
+ struct cam_hw_intf *csid_hw_intf;
+ struct cam_hw_info *csid_hw_info;
+ struct cam_ife_csid_hw *csid_dev = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_ife_csid_hw_info *csid_hw_data = NULL;
+ uint32_t csid_dev_idx;
+ int rc = 0;
+
+ CAM_DBG(CAM_ISP, "probe called");
+
+ csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
+ if (!csid_hw_intf) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ csid_hw_info = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!csid_hw_info) {
+ rc = -ENOMEM;
+ goto free_hw_intf;
+ }
+
+ csid_dev = kzalloc(sizeof(struct cam_ife_csid_hw), GFP_KERNEL);
+ if (!csid_dev) {
+ rc = -ENOMEM;
+ goto free_hw_info;
+ }
+
+ /* get ife csid hw index */
+ of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx);
+ /* get ife csid hw information */
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_ERR(CAM_ISP, "No matching table for the IFE CSID HW!");
+ rc = -EINVAL;
+ goto free_dev;
+ }
+
+ memset(csid_dev_name, 0, sizeof(csid_dev_name));
+ snprintf(csid_dev_name, sizeof(csid_dev_name),
+ "csid%1u", csid_dev_idx);
+
+ csid_hw_intf->hw_idx = csid_dev_idx;
+ csid_hw_intf->hw_type = CAM_ISP_HW_TYPE_IFE_CSID;
+ csid_hw_intf->hw_priv = csid_hw_info;
+
+ csid_hw_info->core_info = csid_dev;
+ csid_hw_info->soc_info.pdev = pdev;
+ csid_hw_info->soc_info.dev = &pdev->dev;
+ csid_hw_info->soc_info.dev_name = csid_dev_name;
+ csid_hw_info->soc_info.index = csid_dev_idx;
+
+ csid_hw_data = (struct cam_ife_csid_hw_info *)match_dev->data;
+ /* need to setup the pdev before call the ife hw probe init */
+ csid_dev->csid_info = csid_hw_data;
+
+ rc = cam_ife_csid_hw_probe_init(csid_hw_intf, csid_dev_idx);
+ if (rc)
+ goto free_dev;
+
+ platform_set_drvdata(pdev, csid_dev);
+ CAM_DBG(CAM_ISP, "CSID:%d probe successful",
+ csid_hw_intf->hw_idx);
+
+
+ if (csid_hw_intf->hw_idx < CAM_IFE_CSID_HW_RES_MAX)
+ cam_ife_csid_hw_list[csid_hw_intf->hw_idx] = csid_hw_intf;
+ else
+ goto free_dev;
+
+ return 0;
+
+free_dev:
+ kfree(csid_dev);
+free_hw_info:
+ kfree(csid_hw_info);
+free_hw_intf:
+ kfree(csid_hw_intf);
+err:
+ return rc;
+}
+
+int cam_ife_csid_remove(struct platform_device *pdev)
+{
+ struct cam_ife_csid_hw *csid_dev = NULL;
+ struct cam_hw_intf *csid_hw_intf;
+ struct cam_hw_info *csid_hw_info;
+
+ csid_dev = (struct cam_ife_csid_hw *)platform_get_drvdata(pdev);
+ csid_hw_intf = csid_dev->hw_intf;
+ csid_hw_info = csid_dev->hw_info;
+
+ CAM_DBG(CAM_ISP, "CSID:%d remove",
+ csid_dev->hw_intf->hw_idx);
+
+ cam_ife_csid_hw_deinit(csid_dev);
+
+ /*release the csid device memory */
+ kfree(csid_dev);
+ kfree(csid_hw_info);
+ kfree(csid_hw_intf);
+ return 0;
+}
+
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+ uint32_t hw_idx)
+{
+ int rc = 0;
+
+ if (cam_ife_csid_hw_list[hw_idx]) {
+ *ife_csid_hw = cam_ife_csid_hw_list[hw_idx];
+ } else {
+ *ife_csid_hw = NULL;
+ rc = -1;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
new file mode 100644
index 000000000000..77c1c0a167af
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_DEV_H_
+#define _CAM_IFE_CSID_DEV_H_
+
+#include "cam_isp_hw.h"
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data);
+
+int cam_ife_csid_probe(struct platform_device *pdev);
+int cam_ife_csid_remove(struct platform_device *pdev);
+
+#endif /*_CAM_IFE_CSID_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
new file mode 100644
index 000000000000..6c39bd84e47f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
@@ -0,0 +1,59 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_ife_csid_lite170.h"
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_LITE_DRV_NAME "csid_lite_170"
+#define CAM_CSID_LITE_VERSION_V170 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid_lite170_hw_info = {
+ .csid_reg = &cam_ife_csid_lite_170_reg_offset,
+ .hw_dts_version = CAM_CSID_LITE_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid_lite170_dt_match[] = {
+ {
+ .compatible = "qcom,csid-lite170",
+ .data = &cam_ife_csid_lite170_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_ife_csid_lite170_dt_match);
+
+static struct platform_driver cam_ife_csid_lite170_driver = {
+ .probe = cam_ife_csid_probe,
+ .remove = cam_ife_csid_remove,
+ .driver = {
+ .name = CAM_CSID_LITE_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cam_ife_csid_lite170_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_ife_csid_lite170_init_module(void)
+{
+ return platform_driver_register(&cam_ife_csid_lite170_driver);
+}
+
+static void __exit cam_ife_csid_lite170_exit_module(void)
+{
+ platform_driver_unregister(&cam_ife_csid_lite170_driver);
+}
+
+module_init(cam_ife_csid_lite170_init_module);
+module_exit(cam_ife_csid_lite170_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID_LITE170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
new file mode 100644
index 000000000000..4fdc518c0385
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
@@ -0,0 +1,319 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_LITE170_H_
+#define _CAM_IFE_CSID_LITE170_H_
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_rdi_reg_offset
+ cam_ife_csid_lite_170_rdi_0_reg_offset = {
+
+ .csid_rdi_irq_status_addr = 0x30,
+ .csid_rdi_irq_mask_addr = 0x34,
+ .csid_rdi_irq_clear_addr = 0x38,
+ .csid_rdi_irq_set_addr = 0x3c,
+ .csid_rdi_cfg0_addr = 0x200,
+ .csid_rdi_cfg1_addr = 0x204,
+ .csid_rdi_ctrl_addr = 0x208,
+ .csid_rdi_frm_drop_pattern_addr = 0x20c,
+ .csid_rdi_frm_drop_period_addr = 0x210,
+ .csid_rdi_irq_subsample_pattern_addr = 0x214,
+ .csid_rdi_irq_subsample_period_addr = 0x218,
+ .csid_rdi_rpp_hcrop_addr = 0x21c,
+ .csid_rdi_rpp_vcrop_addr = 0x220,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x224,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x228,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x22c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x230,
+ .csid_rdi_rst_strobes_addr = 0x240,
+ .csid_rdi_status_addr = 0x250,
+ .csid_rdi_misr_val0_addr = 0x254,
+ .csid_rdi_misr_val1_addr = 0x258,
+ .csid_rdi_misr_val2_addr = 0x25c,
+ .csid_rdi_misr_val3_addr = 0x260,
+ .csid_rdi_format_measure_cfg0_addr = 0x270,
+ .csid_rdi_format_measure_cfg1_addr = 0x274,
+ .csid_rdi_format_measure0_addr = 0x278,
+ .csid_rdi_format_measure1_addr = 0x27c,
+ .csid_rdi_format_measure2_addr = 0x280,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x290,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x294,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x298,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x29c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x2a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x2a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x2a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x2ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x2e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x2e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+ cam_ife_csid_lite_170_rdi_1_reg_offset = {
+
+ .csid_rdi_irq_status_addr = 0x40,
+ .csid_rdi_irq_mask_addr = 0x44,
+ .csid_rdi_irq_clear_addr = 0x48,
+ .csid_rdi_irq_set_addr = 0x4c,
+ .csid_rdi_cfg0_addr = 0x300,
+ .csid_rdi_cfg1_addr = 0x304,
+ .csid_rdi_ctrl_addr = 0x308,
+ .csid_rdi_frm_drop_pattern_addr = 0x30c,
+ .csid_rdi_frm_drop_period_addr = 0x310,
+ .csid_rdi_irq_subsample_pattern_addr = 0x314,
+ .csid_rdi_irq_subsample_period_addr = 0x318,
+ .csid_rdi_rpp_hcrop_addr = 0x31c,
+ .csid_rdi_rpp_vcrop_addr = 0x320,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x324,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x328,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x32c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x330,
+ .csid_rdi_rst_strobes_addr = 0x340,
+ .csid_rdi_status_addr = 0x350,
+ .csid_rdi_misr_val0_addr = 0x354,
+ .csid_rdi_misr_val1_addr = 0x358,
+ .csid_rdi_misr_val2_addr = 0x35c,
+ .csid_rdi_misr_val3_addr = 0x360,
+ .csid_rdi_format_measure_cfg0_addr = 0x370,
+ .csid_rdi_format_measure_cfg1_addr = 0x374,
+ .csid_rdi_format_measure0_addr = 0x378,
+ .csid_rdi_format_measure1_addr = 0x37c,
+ .csid_rdi_format_measure2_addr = 0x380,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x390,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x394,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x398,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x39c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x3a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x3a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x3a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x3ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x3e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+ cam_ife_csid_lite_170_rdi_2_reg_offset = {
+
+ .csid_rdi_irq_status_addr = 0x50,
+ .csid_rdi_irq_mask_addr = 0x54,
+ .csid_rdi_irq_clear_addr = 0x58,
+ .csid_rdi_irq_set_addr = 0x5c,
+ .csid_rdi_cfg0_addr = 0x400,
+ .csid_rdi_cfg1_addr = 0x404,
+ .csid_rdi_ctrl_addr = 0x408,
+ .csid_rdi_frm_drop_pattern_addr = 0x40c,
+ .csid_rdi_frm_drop_period_addr = 0x410,
+ .csid_rdi_irq_subsample_pattern_addr = 0x414,
+ .csid_rdi_irq_subsample_period_addr = 0x418,
+ .csid_rdi_rpp_hcrop_addr = 0x41c,
+ .csid_rdi_rpp_vcrop_addr = 0x420,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x424,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x428,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x42c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x430,
+ .csid_rdi_yuv_chroma_conversion_addr = 0x434,
+ .csid_rdi_rst_strobes_addr = 0x440,
+ .csid_rdi_status_addr = 0x450,
+ .csid_rdi_misr_val0_addr = 0x454,
+ .csid_rdi_misr_val1_addr = 0x458,
+ .csid_rdi_misr_val2_addr = 0x45c,
+ .csid_rdi_misr_val3_addr = 0x460,
+ .csid_rdi_format_measure_cfg0_addr = 0x470,
+ .csid_rdi_format_measure_cfg1_addr = 0x474,
+ .csid_rdi_format_measure0_addr = 0x478,
+ .csid_rdi_format_measure1_addr = 0x47c,
+ .csid_rdi_format_measure2_addr = 0x480,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x490,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x494,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x498,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x49c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x4a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x4a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x4a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x4ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x4e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+ cam_ife_csid_lite_170_rdi_3_reg_offset = {
+
+ .csid_rdi_irq_status_addr = 0x60,
+ .csid_rdi_irq_mask_addr = 0x64,
+ .csid_rdi_irq_clear_addr = 0x68,
+ .csid_rdi_irq_set_addr = 0x6c,
+ .csid_rdi_cfg0_addr = 0x500,
+ .csid_rdi_cfg1_addr = 0x504,
+ .csid_rdi_ctrl_addr = 0x508,
+ .csid_rdi_frm_drop_pattern_addr = 0x50c,
+ .csid_rdi_frm_drop_period_addr = 0x510,
+ .csid_rdi_irq_subsample_pattern_addr = 0x514,
+ .csid_rdi_irq_subsample_period_addr = 0x518,
+ .csid_rdi_rpp_hcrop_addr = 0x51c,
+ .csid_rdi_rpp_vcrop_addr = 0x520,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x524,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x528,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x52c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x530,
+ .csid_rdi_yuv_chroma_conversion_addr = 0x534,
+ .csid_rdi_rst_strobes_addr = 0x540,
+ .csid_rdi_status_addr = 0x550,
+ .csid_rdi_misr_val0_addr = 0x554,
+ .csid_rdi_misr_val1_addr = 0x558,
+ .csid_rdi_misr_val2_addr = 0x55c,
+ .csid_rdi_misr_val3_addr = 0x560,
+ .csid_rdi_format_measure_cfg0_addr = 0x570,
+ .csid_rdi_format_measure_cfg1_addr = 0x574,
+ .csid_rdi_format_measure0_addr = 0x578,
+ .csid_rdi_format_measure1_addr = 0x57c,
+ .csid_rdi_format_measure2_addr = 0x580,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x590,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x594,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x598,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x59c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x5a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x5a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x5a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x5ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x5e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+ cam_ife_csid_lite_170_csi2_reg_offset = {
+
+ .csid_csi2_rx_irq_status_addr = 0x20,
+ .csid_csi2_rx_irq_mask_addr = 0x24,
+ .csid_csi2_rx_irq_clear_addr = 0x28,
+ .csid_csi2_rx_irq_set_addr = 0x2c,
+
+ /*CSI2 rx control */
+ .csid_csi2_rx_cfg0_addr = 0x100,
+ .csid_csi2_rx_cfg1_addr = 0x104,
+ .csid_csi2_rx_capture_ctrl_addr = 0x108,
+ .csid_csi2_rx_rst_strobes_addr = 0x110,
+ .csid_csi2_rx_de_scramble_cfg0_addr = 0x114,
+ .csid_csi2_rx_de_scramble_cfg1_addr = 0x118,
+ .csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr = 0x120,
+ .csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr = 0x124,
+ .csid_csi2_rx_captured_short_pkt_0_addr = 0x128,
+ .csid_csi2_rx_captured_short_pkt_1_addr = 0x12c,
+ .csid_csi2_rx_captured_long_pkt_0_addr = 0x130,
+ .csid_csi2_rx_captured_long_pkt_1_addr = 0x134,
+ .csid_csi2_rx_captured_long_pkt_ftr_addr = 0x138,
+ .csid_csi2_rx_captured_cphy_pkt_hdr_addr = 0x13c,
+ .csid_csi2_rx_lane0_misr_addr = 0x150,
+ .csid_csi2_rx_lane1_misr_addr = 0x154,
+ .csid_csi2_rx_lane2_misr_addr = 0x158,
+ .csid_csi2_rx_lane3_misr_addr = 0x15c,
+ .csid_csi2_rx_total_pkts_rcvd_addr = 0x160,
+ .csid_csi2_rx_stats_ecc_addr = 0x164,
+ .csid_csi2_rx_total_crc_err_addr = 0x168,
+
+ .csi2_rst_srb_all = 0x3FFF,
+ .csi2_rst_done_shift_val = 27,
+ .csi2_irq_mask_all = 0xFFFFFFF,
+ .csi2_misr_enable_shift_val = 6,
+ .csi2_vc_mode_shift_val = 2,
+ .csi2_capture_long_pkt_en_shift = 0,
+ .csi2_capture_short_pkt_en_shift = 1,
+ .csi2_capture_cphy_pkt_en_shift = 2,
+ .csi2_capture_long_pkt_dt_shift = 4,
+ .csi2_capture_long_pkt_vc_shift = 10,
+ .csi2_capture_short_pkt_vc_shift = 15,
+ .csi2_capture_cphy_pkt_dt_shift = 20,
+ .csi2_capture_cphy_pkt_vc_shift = 26,
+};
+
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+ cam_ife_csid_lite_170_tpg_reg_offset = {
+
+ /*CSID TPG control */
+ .csid_tpg_ctrl_addr = 0x600,
+ .csid_tpg_vc_cfg0_addr = 0x604,
+ .csid_tpg_vc_cfg1_addr = 0x608,
+ .csid_tpg_lfsr_seed_addr = 0x60c,
+ .csid_tpg_dt_n_cfg_0_addr = 0x610,
+ .csid_tpg_dt_n_cfg_1_addr = 0x614,
+ .csid_tpg_dt_n_cfg_2_addr = 0x618,
+ .csid_tpg_color_bars_cfg_addr = 0x640,
+ .csid_tpg_color_box_cfg_addr = 0x644,
+ .csid_tpg_common_gen_cfg_addr = 0x648,
+ .csid_tpg_cgen_n_cfg_addr = 0x650,
+ .csid_tpg_cgen_n_x0_addr = 0x654,
+ .csid_tpg_cgen_n_x1_addr = 0x658,
+ .csid_tpg_cgen_n_x2_addr = 0x65c,
+ .csid_tpg_cgen_n_xy_addr = 0x660,
+ .csid_tpg_cgen_n_y1_addr = 0x664,
+ .csid_tpg_cgen_n_y2_addr = 0x668,
+
+ /*configurations */
+ .tpg_dtn_cfg_offset = 0xc,
+ .tpg_cgen_cfg_offset = 0x20,
+ .tpg_cpas_ife_reg_offset = 0x28,
+};
+
+
+static struct cam_ife_csid_common_reg_offset
+ cam_csid_lite_170_cmn_reg_offset = {
+
+ .csid_hw_version_addr = 0x0,
+ .csid_cfg0_addr = 0x4,
+ .csid_ctrl_addr = 0x8,
+ .csid_reset_addr = 0xc,
+ .csid_rst_strobes_addr = 0x10,
+
+ .csid_test_bus_ctrl_addr = 0x14,
+ .csid_top_irq_status_addr = 0x70,
+ .csid_top_irq_mask_addr = 0x74,
+ .csid_top_irq_clear_addr = 0x78,
+ .csid_top_irq_set_addr = 0x7c,
+ .csid_irq_cmd_addr = 0x80,
+
+ /*configurations */
+ .major_version = 1,
+ .minor_version = 7,
+ .version_incr = 0,
+ .no_rdis = 4,
+ .no_pix = 0,
+ .csid_rst_stb = 0x1e,
+ .csid_rst_stb_sw_all = 0x1f,
+ .path_rst_stb_all = 0x7f,
+ .path_rst_done_shift_val = 1,
+ .path_en_shift_val = 31,
+ .dt_id_shift_val = 27,
+ .vc_shift_val = 22,
+ .dt_shift_val = 16,
+ .fmt_shift_val = 12,
+ .plain_fmt_shit_val = 10,
+ .crop_v_en_shift_val = 6,
+ .crop_h_en_shift_val = 5,
+ .crop_shift = 16,
+ .ipp_irq_mask_all = 0x7FFF,
+ .rdi_irq_mask_all = 0x7FFF,
+};
+
+struct cam_ife_csid_reg_offset cam_ife_csid_lite_170_reg_offset = {
+ .cmn_reg = &cam_csid_lite_170_cmn_reg_offset,
+ .csi2_reg = &cam_ife_csid_lite_170_csi2_reg_offset,
+ .ipp_reg = NULL,
+ .rdi_reg = {
+ &cam_ife_csid_lite_170_rdi_0_reg_offset,
+ &cam_ife_csid_lite_170_rdi_1_reg_offset,
+ &cam_ife_csid_lite_170_rdi_2_reg_offset,
+ &cam_ife_csid_lite_170_rdi_3_reg_offset,
+ },
+ .tpg_reg = &cam_ife_csid_lite_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_LITE170_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c
new file mode 100644
index 000000000000..b05a4b533c23
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c
@@ -0,0 +1,57 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_ife_csid_lite17x.h"
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_LITE_DRV_NAME "csid_lite"
+
+static struct cam_ife_csid_hw_info cam_ife_csid_lite_hw_info = {
+ .csid_reg = &cam_ife_csid_lite_17x_reg_offset,
+};
+
+static const struct of_device_id cam_ife_csid_lite_dt_match[] = {
+ {
+ .compatible = "qcom,csid-lite170",
+ .data = &cam_ife_csid_lite_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_ife_csid_lite_dt_match);
+
+static struct platform_driver cam_ife_csid_lite_driver = {
+ .probe = cam_ife_csid_probe,
+ .remove = cam_ife_csid_remove,
+ .driver = {
+ .name = CAM_CSID_LITE_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cam_ife_csid_lite_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_ife_csid_lite_init_module(void)
+{
+ return platform_driver_register(&cam_ife_csid_lite_driver);
+}
+
+static void __exit cam_ife_csid_lite_exit_module(void)
+{
+ platform_driver_unregister(&cam_ife_csid_lite_driver);
+}
+
+module_init(cam_ife_csid_lite_init_module);
+module_exit(cam_ife_csid_lite_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID_LITE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h
new file mode 100644
index 000000000000..d4e05115455a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h
@@ -0,0 +1,320 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_LITE17X_H_
+#define _CAM_IFE_CSID_LITE17X_H_
+#include "cam_ife_csid_core.h"
+
+static const struct cam_ife_csid_rdi_reg_offset
+ cam_ife_csid_lite_17x_rdi_0_reg_offset = {
+
+ .csid_rdi_irq_status_addr = 0x30,
+ .csid_rdi_irq_mask_addr = 0x34,
+ .csid_rdi_irq_clear_addr = 0x38,
+ .csid_rdi_irq_set_addr = 0x3c,
+ .csid_rdi_cfg0_addr = 0x200,
+ .csid_rdi_cfg1_addr = 0x204,
+ .csid_rdi_ctrl_addr = 0x208,
+ .csid_rdi_frm_drop_pattern_addr = 0x20c,
+ .csid_rdi_frm_drop_period_addr = 0x210,
+ .csid_rdi_irq_subsample_pattern_addr = 0x214,
+ .csid_rdi_irq_subsample_period_addr = 0x218,
+ .csid_rdi_rpp_hcrop_addr = 0x21c,
+ .csid_rdi_rpp_vcrop_addr = 0x220,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x224,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x228,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x22c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x230,
+ .csid_rdi_rst_strobes_addr = 0x240,
+ .csid_rdi_status_addr = 0x250,
+ .csid_rdi_misr_val0_addr = 0x254,
+ .csid_rdi_misr_val1_addr = 0x258,
+ .csid_rdi_misr_val2_addr = 0x25c,
+ .csid_rdi_misr_val3_addr = 0x260,
+ .csid_rdi_format_measure_cfg0_addr = 0x270,
+ .csid_rdi_format_measure_cfg1_addr = 0x274,
+ .csid_rdi_format_measure0_addr = 0x278,
+ .csid_rdi_format_measure1_addr = 0x27c,
+ .csid_rdi_format_measure2_addr = 0x280,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x290,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x294,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x298,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x29c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x2a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x2a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x2a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x2ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x2e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x2e4,
+};
+
+static const struct cam_ife_csid_rdi_reg_offset
+ cam_ife_csid_lite_17x_rdi_1_reg_offset = {
+
+ .csid_rdi_irq_status_addr = 0x40,
+ .csid_rdi_irq_mask_addr = 0x44,
+ .csid_rdi_irq_clear_addr = 0x48,
+ .csid_rdi_irq_set_addr = 0x4c,
+ .csid_rdi_cfg0_addr = 0x300,
+ .csid_rdi_cfg1_addr = 0x304,
+ .csid_rdi_ctrl_addr = 0x308,
+ .csid_rdi_frm_drop_pattern_addr = 0x30c,
+ .csid_rdi_frm_drop_period_addr = 0x310,
+ .csid_rdi_irq_subsample_pattern_addr = 0x314,
+ .csid_rdi_irq_subsample_period_addr = 0x318,
+ .csid_rdi_rpp_hcrop_addr = 0x31c,
+ .csid_rdi_rpp_vcrop_addr = 0x320,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x324,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x328,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x32c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x330,
+ .csid_rdi_rst_strobes_addr = 0x340,
+ .csid_rdi_status_addr = 0x350,
+ .csid_rdi_misr_val0_addr = 0x354,
+ .csid_rdi_misr_val1_addr = 0x358,
+ .csid_rdi_misr_val2_addr = 0x35c,
+ .csid_rdi_misr_val3_addr = 0x360,
+ .csid_rdi_format_measure_cfg0_addr = 0x370,
+ .csid_rdi_format_measure_cfg1_addr = 0x374,
+ .csid_rdi_format_measure0_addr = 0x378,
+ .csid_rdi_format_measure1_addr = 0x37c,
+ .csid_rdi_format_measure2_addr = 0x380,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x390,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x394,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x398,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x39c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x3a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x3a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x3a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x3ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x3e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x3e4,
+};
+
+static const struct cam_ife_csid_rdi_reg_offset
+ cam_ife_csid_lite_17x_rdi_2_reg_offset = {
+
+ .csid_rdi_irq_status_addr = 0x50,
+ .csid_rdi_irq_mask_addr = 0x54,
+ .csid_rdi_irq_clear_addr = 0x58,
+ .csid_rdi_irq_set_addr = 0x5c,
+ .csid_rdi_cfg0_addr = 0x400,
+ .csid_rdi_cfg1_addr = 0x404,
+ .csid_rdi_ctrl_addr = 0x408,
+ .csid_rdi_frm_drop_pattern_addr = 0x40c,
+ .csid_rdi_frm_drop_period_addr = 0x410,
+ .csid_rdi_irq_subsample_pattern_addr = 0x414,
+ .csid_rdi_irq_subsample_period_addr = 0x418,
+ .csid_rdi_rpp_hcrop_addr = 0x41c,
+ .csid_rdi_rpp_vcrop_addr = 0x420,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x424,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x428,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x42c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x430,
+ .csid_rdi_yuv_chroma_conversion_addr = 0x434,
+ .csid_rdi_rst_strobes_addr = 0x440,
+ .csid_rdi_status_addr = 0x450,
+ .csid_rdi_misr_val0_addr = 0x454,
+ .csid_rdi_misr_val1_addr = 0x458,
+ .csid_rdi_misr_val2_addr = 0x45c,
+ .csid_rdi_misr_val3_addr = 0x460,
+ .csid_rdi_format_measure_cfg0_addr = 0x470,
+ .csid_rdi_format_measure_cfg1_addr = 0x474,
+ .csid_rdi_format_measure0_addr = 0x478,
+ .csid_rdi_format_measure1_addr = 0x47c,
+ .csid_rdi_format_measure2_addr = 0x480,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x490,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x494,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x498,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x49c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x4a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x4a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x4a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x4ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x4e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x4e4,
+};
+
+static const struct cam_ife_csid_rdi_reg_offset
+ cam_ife_csid_lite_17x_rdi_3_reg_offset = {
+
+ .csid_rdi_irq_status_addr = 0x60,
+ .csid_rdi_irq_mask_addr = 0x64,
+ .csid_rdi_irq_clear_addr = 0x68,
+ .csid_rdi_irq_set_addr = 0x6c,
+ .csid_rdi_cfg0_addr = 0x500,
+ .csid_rdi_cfg1_addr = 0x504,
+ .csid_rdi_ctrl_addr = 0x508,
+ .csid_rdi_frm_drop_pattern_addr = 0x50c,
+ .csid_rdi_frm_drop_period_addr = 0x510,
+ .csid_rdi_irq_subsample_pattern_addr = 0x514,
+ .csid_rdi_irq_subsample_period_addr = 0x518,
+ .csid_rdi_rpp_hcrop_addr = 0x51c,
+ .csid_rdi_rpp_vcrop_addr = 0x520,
+ .csid_rdi_rpp_pix_drop_pattern_addr = 0x524,
+ .csid_rdi_rpp_pix_drop_period_addr = 0x528,
+ .csid_rdi_rpp_line_drop_pattern_addr = 0x52c,
+ .csid_rdi_rpp_line_drop_period_addr = 0x530,
+ .csid_rdi_yuv_chroma_conversion_addr = 0x534,
+ .csid_rdi_rst_strobes_addr = 0x540,
+ .csid_rdi_status_addr = 0x550,
+ .csid_rdi_misr_val0_addr = 0x554,
+ .csid_rdi_misr_val1_addr = 0x558,
+ .csid_rdi_misr_val2_addr = 0x55c,
+ .csid_rdi_misr_val3_addr = 0x560,
+ .csid_rdi_format_measure_cfg0_addr = 0x570,
+ .csid_rdi_format_measure_cfg1_addr = 0x574,
+ .csid_rdi_format_measure0_addr = 0x578,
+ .csid_rdi_format_measure1_addr = 0x57c,
+ .csid_rdi_format_measure2_addr = 0x580,
+ .csid_rdi_timestamp_curr0_sof_addr = 0x590,
+ .csid_rdi_timestamp_curr1_sof_addr = 0x594,
+ .csid_rdi_timestamp_prev0_sof_addr = 0x598,
+ .csid_rdi_timestamp_prev1_sof_addr = 0x59c,
+ .csid_rdi_timestamp_curr0_eof_addr = 0x5a0,
+ .csid_rdi_timestamp_curr1_eof_addr = 0x5a4,
+ .csid_rdi_timestamp_prev0_eof_addr = 0x5a8,
+ .csid_rdi_timestamp_prev1_eof_addr = 0x5ac,
+ .csid_rdi_byte_cntr_ping_addr = 0x5e0,
+ .csid_rdi_byte_cntr_pong_addr = 0x5e4,
+};
+
+static const struct cam_ife_csid_csi2_rx_reg_offset
+ cam_ife_csid_lite_17x_csi2_reg_offset = {
+
+ .csid_csi2_rx_irq_status_addr = 0x20,
+ .csid_csi2_rx_irq_mask_addr = 0x24,
+ .csid_csi2_rx_irq_clear_addr = 0x28,
+ .csid_csi2_rx_irq_set_addr = 0x2c,
+
+ /*CSI2 rx control */
+ .csid_csi2_rx_cfg0_addr = 0x100,
+ .csid_csi2_rx_cfg1_addr = 0x104,
+ .csid_csi2_rx_capture_ctrl_addr = 0x108,
+ .csid_csi2_rx_rst_strobes_addr = 0x110,
+ .csid_csi2_rx_de_scramble_cfg0_addr = 0x114,
+ .csid_csi2_rx_de_scramble_cfg1_addr = 0x118,
+ .csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr = 0x120,
+ .csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr = 0x124,
+ .csid_csi2_rx_captured_short_pkt_0_addr = 0x128,
+ .csid_csi2_rx_captured_short_pkt_1_addr = 0x12c,
+ .csid_csi2_rx_captured_long_pkt_0_addr = 0x130,
+ .csid_csi2_rx_captured_long_pkt_1_addr = 0x134,
+ .csid_csi2_rx_captured_long_pkt_ftr_addr = 0x138,
+ .csid_csi2_rx_captured_cphy_pkt_hdr_addr = 0x13c,
+ .csid_csi2_rx_lane0_misr_addr = 0x150,
+ .csid_csi2_rx_lane1_misr_addr = 0x154,
+ .csid_csi2_rx_lane2_misr_addr = 0x158,
+ .csid_csi2_rx_lane3_misr_addr = 0x15c,
+ .csid_csi2_rx_total_pkts_rcvd_addr = 0x160,
+ .csid_csi2_rx_stats_ecc_addr = 0x164,
+ .csid_csi2_rx_total_crc_err_addr = 0x168,
+
+ .csi2_rst_srb_all = 0x3FFF,
+ .csi2_rst_done_shift_val = 27,
+ .csi2_irq_mask_all = 0xFFFFFFF,
+ .csi2_misr_enable_shift_val = 6,
+ .csi2_vc_mode_shift_val = 2,
+ .csi2_capture_long_pkt_en_shift = 0,
+ .csi2_capture_short_pkt_en_shift = 1,
+ .csi2_capture_cphy_pkt_en_shift = 2,
+ .csi2_capture_long_pkt_dt_shift = 4,
+ .csi2_capture_long_pkt_vc_shift = 10,
+ .csi2_capture_short_pkt_vc_shift = 15,
+ .csi2_capture_cphy_pkt_dt_shift = 20,
+ .csi2_capture_cphy_pkt_vc_shift = 26,
+};
+
+
+static const struct cam_ife_csid_csi2_tpg_reg_offset
+ cam_ife_csid_lite_17x_tpg_reg_offset = {
+
+ /*CSID TPG control */
+ .csid_tpg_ctrl_addr = 0x600,
+ .csid_tpg_vc_cfg0_addr = 0x604,
+ .csid_tpg_vc_cfg1_addr = 0x608,
+ .csid_tpg_lfsr_seed_addr = 0x60c,
+ .csid_tpg_dt_n_cfg_0_addr = 0x610,
+ .csid_tpg_dt_n_cfg_1_addr = 0x614,
+ .csid_tpg_dt_n_cfg_2_addr = 0x618,
+ .csid_tpg_color_bars_cfg_addr = 0x640,
+ .csid_tpg_color_box_cfg_addr = 0x644,
+ .csid_tpg_common_gen_cfg_addr = 0x648,
+ .csid_tpg_cgen_n_cfg_addr = 0x650,
+ .csid_tpg_cgen_n_x0_addr = 0x654,
+ .csid_tpg_cgen_n_x1_addr = 0x658,
+ .csid_tpg_cgen_n_x2_addr = 0x65c,
+ .csid_tpg_cgen_n_xy_addr = 0x660,
+ .csid_tpg_cgen_n_y1_addr = 0x664,
+ .csid_tpg_cgen_n_y2_addr = 0x668,
+
+ /*configurations */
+ .tpg_dtn_cfg_offset = 0xc,
+ .tpg_cgen_cfg_offset = 0x20,
+ .tpg_cpas_ife_reg_offset = 0x28,
+};
+
+
+static const struct cam_ife_csid_common_reg_offset
+ cam_csid_lite_17x_cmn_reg_offset = {
+
+ .csid_hw_version_addr = 0x0,
+ .csid_cfg0_addr = 0x4,
+ .csid_ctrl_addr = 0x8,
+ .csid_reset_addr = 0xc,
+ .csid_rst_strobes_addr = 0x10,
+
+ .csid_test_bus_ctrl_addr = 0x14,
+ .csid_top_irq_status_addr = 0x70,
+ .csid_top_irq_mask_addr = 0x74,
+ .csid_top_irq_clear_addr = 0x78,
+ .csid_top_irq_set_addr = 0x7c,
+ .csid_irq_cmd_addr = 0x80,
+
+ /*configurations */
+ .major_version = 1,
+ .minor_version = 7,
+ .version_incr = 0,
+ .num_rdis = 4,
+ .num_pix = 0,
+ .csid_rst_stb = 0x1e,
+ .csid_rst_stb_sw_all = 0x1f,
+ .path_rst_stb_all = 0x7f,
+ .path_rst_done_shift_val = 1,
+ .path_en_shift_val = 31,
+ .dt_id_shift_val = 27,
+ .vc_shift_val = 22,
+ .dt_shift_val = 16,
+ .fmt_shift_val = 12,
+ .plain_fmt_shit_val = 10,
+ .crop_v_en_shift_val = 6,
+ .crop_h_en_shift_val = 5,
+ .crop_shift = 16,
+ .ipp_irq_mask_all = 0x7FFF,
+ .rdi_irq_mask_all = 0x7FFF,
+ .ppp_irq_mask_all = 0xFFFF,
+};
+
+static const struct cam_ife_csid_reg_offset cam_ife_csid_lite_17x_reg_offset = {
+ .cmn_reg = &cam_csid_lite_17x_cmn_reg_offset,
+ .csi2_reg = &cam_ife_csid_lite_17x_csi2_reg_offset,
+ .ipp_reg = NULL,
+ .rdi_reg = {
+ &cam_ife_csid_lite_17x_rdi_0_reg_offset,
+ &cam_ife_csid_lite_17x_rdi_1_reg_offset,
+ &cam_ife_csid_lite_17x_rdi_2_reg_offset,
+ &cam_ife_csid_lite_17x_rdi_3_reg_offset,
+ },
+ .tpg_reg = &cam_ife_csid_lite_17x_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_LITE17X_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
new file mode 100644
index 000000000000..890dad3b66b2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -0,0 +1,258 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include "cam_ife_csid_soc.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+ struct device_node *of_node = NULL;
+ struct csid_device_soc_info *csid_soc_info = NULL;
+ int rc = 0;
+
+ of_node = soc_info->pdev->dev.of_node;
+ csid_soc_info = (struct csid_device_soc_info *)soc_info->soc_private;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+static int cam_ife_csid_request_platform_resource(
+ struct cam_hw_soc_info *soc_info,
+ irq_handler_t csid_irq_handler,
+ void *irq_data)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_request_platform_resource(soc_info, csid_irq_handler,
+ irq_data);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t csid_irq_handler, void *irq_data)
+{
+ int rc = 0;
+ struct cam_cpas_register_params cpas_register_param;
+ struct cam_csid_soc_private *soc_private;
+
+ soc_private = kzalloc(sizeof(struct cam_csid_soc_private), GFP_KERNEL);
+ if (!soc_private)
+ return -ENOMEM;
+
+ soc_info->soc_private = soc_private;
+
+ rc = cam_ife_csid_get_dt_properties(soc_info);
+ if (rc < 0)
+ return rc;
+
+ /* Need to see if we want post process the clock list */
+
+ rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler,
+ irq_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP,
+ "Error Request platform resources failed rc=%d", rc);
+ goto free_soc_private;
+ }
+
+ memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+ strlcpy(cpas_register_param.identifier, "csid",
+ CAM_HW_IDENTIFIER_LENGTH);
+ cpas_register_param.cell_index = soc_info->index;
+ cpas_register_param.dev = soc_info->dev;
+ rc = cam_cpas_register_client(&cpas_register_param);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+ goto release_soc;
+ } else {
+ soc_private->cpas_handle = cpas_register_param.client_handle;
+ }
+
+ return rc;
+
+release_soc:
+ cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+ kfree(soc_private);
+
+ return rc;
+}
+
+int cam_ife_csid_deinit_soc_resources(
+ struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+
+ soc_private = soc_info->soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error soc_private NULL");
+ return -ENODEV;
+ }
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+
+ return rc;
+}
+
+int cam_ife_csid_enable_soc_resources(
+ struct cam_hw_soc_info *soc_info, uint32_t clk_lvl)
+{
+ int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+
+ soc_private = soc_info->soc_private;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+ CAM_DBG(CAM_ISP, "csid vote compressed_bw:%lld uncompressed_bw:%lld",
+ axi_vote.compressed_bw, axi_vote.uncompressed_bw);
+
+ rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error CPAS start failed");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ clk_lvl, true);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "enable platform failed");
+ goto stop_cpas;
+ }
+
+ return rc;
+
+stop_cpas:
+ cam_cpas_stop(soc_private->cpas_handle);
+end:
+ return rc;
+}
+
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ return -EINVAL;
+ }
+ soc_private = soc_info->soc_private;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Disable platform failed");
+
+ rc = cam_cpas_stop(soc_private->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+int cam_ife_csid_enable_ife_force_clock_on(struct cam_hw_soc_info *soc_info,
+ uint32_t cpas_ife_base_offset)
+{
+ int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+ uint32_t cpass_ife_force_clk_offset;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ return -EINVAL;
+ }
+
+ soc_private = soc_info->soc_private;
+ cpass_ife_force_clk_offset =
+ cpas_ife_base_offset + (0x4 * soc_info->index);
+ rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REG_CPASTOP,
+ cpass_ife_force_clk_offset, 1, 1);
+
+ if (rc)
+ CAM_ERR(CAM_ISP, "CPASS set IFE:%d Force clock On failed",
+ soc_info->index);
+ else
+ CAM_DBG(CAM_ISP, "CPASS set IFE:%d Force clock On",
+ soc_info->index);
+
+ return rc;
+}
+
+int cam_ife_csid_disable_ife_force_clock_on(struct cam_hw_soc_info *soc_info,
+ uint32_t cpas_ife_base_offset)
+{
+ int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+ uint32_t cpass_ife_force_clk_offset;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ return -EINVAL;
+ }
+
+ soc_private = soc_info->soc_private;
+ cpass_ife_force_clk_offset =
+ cpas_ife_base_offset + (0x4 * soc_info->index);
+ rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REG_CPASTOP,
+ cpass_ife_force_clk_offset, 1, 0);
+
+ if (rc)
+ CAM_ERR(CAM_ISP, "CPASS set IFE:%d Force clock Off failed",
+ soc_info->index);
+ else
+ CAM_DBG(CAM_ISP, "CPASS set IFE:%d Force clock off",
+ soc_info->index);
+
+ return rc;
+}
+
+uint32_t cam_ife_csid_get_vote_level(struct cam_hw_soc_info *soc_info,
+ uint64_t clock_rate)
+{
+ int i = 0;
+
+ if (!clock_rate)
+ return CAM_SVS_VOTE;
+
+ for (i = 0; i < CAM_MAX_VOTE; i++) {
+ if (soc_info->clk_rate[i][soc_info->num_clk - 1] >=
+ clock_rate) {
+ CAM_DBG(CAM_ISP,
+ "Clock rate %lld, selected clock level %d",
+ clock_rate, i);
+ return i;
+ }
+ }
+
+ return CAM_TURBO_VOTE;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
new file mode 100644
index 000000000000..474f0852dbec
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -0,0 +1,126 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_SOC_H_
+#define _CAM_IFE_CSID_SOC_H_
+
+#include "cam_isp_hw.h"
+
+/*
+ * struct cam_csid_soc_private:
+ *
+ * @Brief: Private SOC data specific to CSID HW Driver
+ *
+ * @cpas_handle: Handle returned on registering with CPAS driver.
+ * This handle is used for all further interface
+ * with CPAS.
+ */
+struct cam_csid_soc_private {
+ uint32_t cpas_handle;
+};
+
+/**
+ * struct csid_device_soc_info - CSID SOC info object
+ *
+ * @csi_vdd_voltage: csi vdd voltage value
+ *
+ */
+struct csid_device_soc_info {
+ int csi_vdd_voltage;
+};
+
+/**
+ * cam_ife_csid_init_soc_resources()
+ *
+ * @brief: csid initialization function for the soc info
+ *
+ * @soc_info: soc info structure pointer
+ * @csid_irq_handler: irq handler function to be registered
+ * @irq_data: irq data for the callback function
+ *
+ */
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t csid_irq_handler, void *irq_data);
+
+
+/**
+ * cam_ife_csid_deinit_soc_resources()
+ *
+ * @brief: csid de initialization function for the soc info
+ *
+ * @soc_info: soc info structure pointer
+ *
+ */
+int cam_ife_csid_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_ife_csid_enable_soc_resources()
+ *
+ * @brief: csid soc resource enable function
+ *
+ * @soc_info: soc info structure pointer
+ * @clk_lvl: vote level to start with
+ *
+ */
+int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_lvl);
+
+/**
+ * cam_ife_csid_disable_soc_resources()
+ *
+ * @brief: csid soc resource disable function
+ *
+ * @soc_info: soc info structure pointer
+ *
+ */
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_ife_csid_enable_ife_force_clock()
+ *
+ * @brief: if csid testgen used for dual isp case, before
+ * starting csid test gen, enable ife force clock on
+ * through cpas
+ *
+ * @soc_info: soc info structure pointer
+ * @cpas_ife_base_offset: cpas ife force clock base reg offset value
+ *
+ */
+int cam_ife_csid_enable_ife_force_clock_on(struct cam_hw_soc_info *soc_info,
+ uint32_t cpas_ife_base_offset);
+
+/**
+ * cam_ife_csid_disable_ife_force_clock_on()
+ *
+ * @brief: disable the IFE force clock on after dual ISP
+ * CSID test gen stop
+ *
+ * @soc_info: soc info structure pointer
+ * @cpas_ife_base_offset: cpas ife force clock base reg offset value
+ *
+ */
+int cam_ife_csid_disable_ife_force_clock_on(struct cam_hw_soc_info *soc_info,
+ uint32_t cpas_ife_base_offset);
+
+/**
+ * cam_ife_csid_get_vote_level()
+ *
+ * @brief: get the vote level from clock rate
+ *
+ * @soc_info: soc info structure pointer
+ * @clock_rate clock rate
+ *
+ */
+uint32_t cam_ife_csid_get_vote_level(struct cam_hw_soc_info *soc_info,
+ uint64_t clock_rate);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
new file mode 100644
index 000000000000..58818fbecf67
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -0,0 +1,184 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSID_HW_INTF_H_
+#define _CAM_CSID_HW_INTF_H_
+
+#include "cam_isp_hw.h"
+#include "cam_hw_intf.h"
+
+/* MAX IFE CSID instance */
+#define CAM_IFE_CSID_HW_NUM_MAX 4
+
+/**
+ * enum cam_ife_pix_path_res_id - Specify the csid patch
+ */
+enum cam_ife_pix_path_res_id {
+ CAM_IFE_PIX_PATH_RES_RDI_0,
+ CAM_IFE_PIX_PATH_RES_RDI_1,
+ CAM_IFE_PIX_PATH_RES_RDI_2,
+ CAM_IFE_PIX_PATH_RES_RDI_3,
+ CAM_IFE_PIX_PATH_RES_IPP,
+ CAM_IFE_PIX_PATH_RES_PPP,
+ CAM_IFE_PIX_PATH_RES_MAX,
+};
+
+/**
+ * enum cam_ife_cid_res_id - Specify the csid cid
+ */
+enum cam_ife_cid_res_id {
+ CAM_IFE_CSID_CID_0,
+ CAM_IFE_CSID_CID_1,
+ CAM_IFE_CSID_CID_2,
+ CAM_IFE_CSID_CID_3,
+ CAM_IFE_CSID_CID_MAX,
+};
+
+/**
+ * struct cam_ife_csid_hw_caps- get the CSID hw capability
+ * @num_rdis: number of rdis supported by CSID HW device
+ * @num_pix: number of pxl paths supported by CSID HW device
+ * @num_ppp: number of ppp paths supported by CSID HW device
+ * @major_version : major version
+ * @minor_version: minor version
+ * @version_incr: version increment
+ *
+ */
+struct cam_ife_csid_hw_caps {
+ uint32_t num_rdis;
+ uint32_t num_pix;
+ uint32_t num_ppp;
+ uint32_t major_version;
+ uint32_t minor_version;
+ uint32_t version_incr;
+};
+
+/**
+ * struct cam_csid_hw_reserve_resource- hw reserve
+ * @res_type : Reource type CID or PATH
+ * if type is CID, then res_id is not required,
+ * if type is path then res id need to be filled
+ * @res_id : Resource id to be reserved
+ * @in_port : Input port resource info
+ * @out_port: Output port resource info, used for RDI path only
+ * @sync_mode: Sync mode
+ * Sync mode could be master, slave or none
+ * @master_idx: Master device index to be configured in the slave path
+ * for master path, this value is not required.
+ * only slave need to configure the master index value
+ * @cid: cid (DT_ID) value for path, this is applicable for CSID path
+ * reserve
+ * @node_res : Reserved resource structure pointer
+ *
+ */
+struct cam_csid_hw_reserve_resource_args {
+ enum cam_isp_resource_type res_type;
+ uint32_t res_id;
+ struct cam_isp_in_port_info *in_port;
+ struct cam_isp_out_port_info *out_port;
+ enum cam_isp_hw_sync_mode sync_mode;
+ uint32_t master_idx;
+ uint32_t cid;
+ struct cam_isp_resource_node *node_res;
+};
+
+/**
+ * enum cam_ife_csid_halt_cmd - Specify the halt command type
+ */
+enum cam_ife_csid_halt_cmd {
+ CAM_CSID_HALT_AT_FRAME_BOUNDARY,
+ CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+ CAM_CSID_HALT_IMMEDIATELY,
+ CAM_CSID_HALT_MAX,
+};
+
+/**
+ * struct cam_csid_hw_stop- stop all resources
+ * @stop_cmd : Applicable only for PATH resources
+ * if stop command set to Halt immediately,driver will stop
+ * path immediately, manager need to reset the path after HI
+ * if stop command set to halt at frame boundary, driver will set
+ * halt at frame boundary and wait for frame boundary
+ * @node_res : reource pointer array( ie cid or CSID)
+ * @num_res : number of resources to be stopped
+ *
+ */
+struct cam_csid_hw_stop_args {
+ enum cam_ife_csid_halt_cmd stop_cmd;
+ struct cam_isp_resource_node **node_res;
+ uint32_t num_res;
+};
+
+/**
+ * enum cam_ife_csid_reset_type - Specify the reset type
+ */
+enum cam_ife_csid_reset_type {
+ CAM_IFE_CSID_RESET_GLOBAL,
+ CAM_IFE_CSID_RESET_PATH,
+ CAM_IFE_CSID_RESET_MAX,
+};
+
+/**
+ * struct cam_ife_csid_reset_cfg- csid reset configuration
+ * @ reset_type : Global reset or path reset
+ * @res_node : resource need to be reset
+ *
+ */
+struct cam_csid_reset_cfg_args {
+ enum cam_ife_csid_reset_type reset_type;
+ struct cam_isp_resource_node *node_res;
+};
+
+/**
+ * struct cam_csid_get_time_stamp_args- time stamp capture arguments
+ * @res_node : resource to get the time stamp
+ * @time_stamp_val : captured time stamp
+ * @boot_timestamp : boot time stamp
+ */
+struct cam_csid_get_time_stamp_args {
+ struct cam_isp_resource_node *node_res;
+ uint64_t time_stamp_val;
+ uint64_t boot_timestamp;
+};
+
+/**
+ * enum cam_ife_csid_cmd_type - Specify the csid command
+ */
+enum cam_ife_csid_cmd_type {
+ CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+ CAM_IFE_CSID_SET_CSID_DEBUG,
+ CAM_IFE_CSID_SOF_IRQ_DEBUG,
+ CAM_IFE_CSID_CMD_MAX,
+};
+
+/**
+ * cam_ife_csid_hw_init()
+ *
+ * @brief: Initialize function for the CSID hardware
+ *
+ * @ife_csid_hw: CSID hardware instance returned
+ * @hw_idex: CSID hardware instance id
+ */
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+ uint32_t hw_idx);
+
+/*
+ * struct cam_ife_csid_clock_update_args:
+ *
+ * @clk_rate: Clock rate requested
+ */
+struct cam_ife_csid_clock_update_args {
+ uint64_t clk_rate;
+};
+
+
+#endif /* _CAM_CSID_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
new file mode 100644
index 000000000000..940d338a9572
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -0,0 +1,227 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_H_
+#define _CAM_ISP_HW_H_
+
+#include <linux/completion.h>
+#include "cam_hw.h"
+#include <uapi/media/cam_isp.h>
+#include "cam_soc_util.h"
+#include "cam_irq_controller.h"
+#include <uapi/media/cam_isp.h>
+
+/*
+ * struct cam_isp_timestamp:
+ *
+ * @mono_time: Monotonic boot time
+ * @vt_time: AV Timer time
+ * @ticks: Qtimer ticks
+ */
+struct cam_isp_timestamp {
+ struct timeval mono_time;
+ struct timeval vt_time;
+ uint64_t ticks;
+};
+
+/*
+ * cam_isp_hw_get_timestamp()
+ *
+ * @Brief: Get timestamp values
+ *
+ * @time_stamp: Structure that holds different time values
+ *
+ * @Return: Void
+ */
+void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp);
+
+enum cam_isp_hw_type {
+ CAM_ISP_HW_TYPE_CSID = 0,
+ CAM_ISP_HW_TYPE_ISPIF = 1,
+ CAM_ISP_HW_TYPE_VFE = 2,
+ CAM_ISP_HW_TYPE_IFE_CSID = 3,
+ CAM_ISP_HW_TYPE_MAX = 4,
+};
+
+enum cam_isp_hw_split_id {
+ CAM_ISP_HW_SPLIT_LEFT = 0,
+ CAM_ISP_HW_SPLIT_RIGHT,
+ CAM_ISP_HW_SPLIT_MAX,
+};
+
+enum cam_isp_hw_sync_mode {
+ CAM_ISP_HW_SYNC_NONE,
+ CAM_ISP_HW_SYNC_MASTER,
+ CAM_ISP_HW_SYNC_SLAVE,
+ CAM_ISP_HW_SYNC_MAX,
+};
+
+enum cam_isp_resource_state {
+ CAM_ISP_RESOURCE_STATE_UNAVAILABLE = 0,
+ CAM_ISP_RESOURCE_STATE_AVAILABLE = 1,
+ CAM_ISP_RESOURCE_STATE_RESERVED = 2,
+ CAM_ISP_RESOURCE_STATE_INIT_HW = 3,
+ CAM_ISP_RESOURCE_STATE_STREAMING = 4,
+};
+
+enum cam_isp_resource_type {
+ CAM_ISP_RESOURCE_UNINT,
+ CAM_ISP_RESOURCE_SRC,
+ CAM_ISP_RESOURCE_CID,
+ CAM_ISP_RESOURCE_PIX_PATH,
+ CAM_ISP_RESOURCE_VFE_IN,
+ CAM_ISP_RESOURCE_VFE_OUT,
+ CAM_ISP_RESOURCE_MAX,
+};
+
+enum cam_isp_hw_cmd_type {
+ CAM_ISP_HW_CMD_GET_CHANGE_BASE,
+ CAM_ISP_HW_CMD_GET_BUF_UPDATE,
+ CAM_ISP_HW_CMD_GET_REG_UPDATE,
+ CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+ CAM_ISP_HW_CMD_GET_SECURE_MODE,
+ CAM_ISP_HW_CMD_STRIPE_UPDATE,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ CAM_ISP_HW_CMD_BW_UPDATE,
+ CAM_ISP_HW_CMD_BW_CONTROL,
+ CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
+ CAM_ISP_HW_CMD_GET_REG_DUMP,
+ CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
+ CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
+ CAM_ISP_HW_CMD_UBWC_UPDATE,
+ CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE,
+ CAM_ISP_HW_CMD_MAX,
+};
+
+/*
+ * struct cam_isp_resource_node:
+ *
+ * @Brief: Structure representing HW resource object
+ *
+ * @res_type: Resource Type
+ * @res_id: Unique resource ID within res_type objects
+ * for a particular HW
+ * @res_state: State of the resource
+ * @hw_intf: HW Interface of HW to which this resource
+ * belongs
+ * @res_priv: Private data of the resource
+ * @list: list_head node for this resource
+ * @cdm_ops: CDM operation functions
+ * @tasklet_info: Tasklet structure that will be used to
+ * schedule IRQ events related to this resource
+ * @irq_handle: handle returned on subscribing for IRQ event
+ * @rdi_only_ctx: resource belong to rdi only context or not
+ * @init: function pointer to init the HW resource
+ * @deinit: function pointer to deinit the HW resource
+ * @start: function pointer to start the HW resource
+ * @stop: function pointer to stop the HW resource
+ * @process_cmd: function pointer for processing commands
+ * specific to the resource
+ * @top_half_handler: Top Half handler function
+ * @bottom_half_handler: Bottom Half handler function
+ */
+struct cam_isp_resource_node {
+ enum cam_isp_resource_type res_type;
+ uint32_t res_id;
+ enum cam_isp_resource_state res_state;
+ struct cam_hw_intf *hw_intf;
+ void *res_priv;
+ struct list_head list;
+ void *cdm_ops;
+ void *tasklet_info;
+ int irq_handle;
+ int rdi_only_ctx;
+
+ int (*init)(struct cam_isp_resource_node *rsrc_node,
+ void *init_args, uint32_t arg_size);
+ int (*deinit)(struct cam_isp_resource_node *rsrc_node,
+ void *deinit_args, uint32_t arg_size);
+ int (*start)(struct cam_isp_resource_node *rsrc_node);
+ int (*stop)(struct cam_isp_resource_node *rsrc_node);
+ int (*process_cmd)(struct cam_isp_resource_node *rsrc_node,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+ CAM_IRQ_HANDLER_TOP_HALF top_half_handler;
+ CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler;
+};
+
+/*
+ * struct cam_isp_hw_cmd_buf_update:
+ *
+ * @Brief: Contain the new created command buffer information
+ *
+ * @cmd_buf_addr: Command buffer to store the change base command
+ * @size: Size of the buffer in bytes
+ * @used_bytes: Consumed bytes in the command buffer
+ *
+ */
+struct cam_isp_hw_cmd_buf_update {
+ uint32_t *cmd_buf_addr;
+ uint32_t size;
+ uint32_t used_bytes;
+};
+
+/*
+ * struct cam_isp_hw_get_wm_update:
+ *
+ * @Brief: Get cmd buffer for WM updates.
+ *
+ * @ image_buf: image buffer address array
+ * @ num_buf: Number of buffers in the image_buf array
+ * @ io_cfg: IO buffer config information sent from UMD
+ *
+ */
+struct cam_isp_hw_get_wm_update {
+ uint64_t *image_buf;
+ uint32_t num_buf;
+ struct cam_buf_io_cfg *io_cfg;
+};
+
+/*
+ * struct cam_isp_hw_get_cmd_update:
+ *
+ * @Brief: Get cmd buffer update for different CMD types
+ *
+ * @res: Resource node
+ * @cmd_type: Command type for which to get update
+ * @cmd: Command buffer information
+ *
+ */
+struct cam_isp_hw_get_cmd_update {
+ struct cam_isp_resource_node *res;
+ enum cam_isp_hw_cmd_type cmd_type;
+ struct cam_isp_hw_cmd_buf_update cmd;
+ union {
+ void *data;
+ struct cam_isp_hw_get_wm_update *wm_update;
+ struct cam_isp_port_hfr_config *hfr_update;
+ struct cam_isp_clock_config *clock_update;
+ struct cam_isp_bw_config *bw_update;
+ struct cam_ubwc_plane_cfg_v1 *ubwc_update;
+ };
+};
+
+/*
+ * struct cam_isp_hw_dual_isp_update_args:
+ *
+ * @Brief: update the dual isp striping configuration.
+ *
+ * @ split_id: spilt id to inform left or rifht
+ * @ res: resource node
+ * @ dual_cfg: dual isp configuration
+ *
+ */
+struct cam_isp_hw_dual_isp_update_args {
+ enum cam_isp_hw_split_id split_id;
+ struct cam_isp_resource_node *res;
+ struct cam_isp_dual_config *dual_cfg;
+};
+#endif /* _CAM_ISP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
new file mode 100644
index 000000000000..b957d6913f77
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -0,0 +1,302 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_HW_INTF_H_
+#define _CAM_VFE_HW_INTF_H_
+
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_HW_NUM_MAX 4
+
+#define VFE_CORE_BASE_IDX 0
+/*
+ * VBIF and BUS do not exist on same HW.
+ * Hence both can be 1 below.
+ */
+#define VFE_VBIF_BASE_IDX 1
+#define VFE_BUS_BASE_IDX 1
+
+enum cam_isp_hw_vfe_in_mux {
+ CAM_ISP_HW_VFE_IN_CAMIF = 0,
+ CAM_ISP_HW_VFE_IN_TESTGEN = 1,
+ CAM_ISP_HW_VFE_IN_BUS_RD = 2,
+ CAM_ISP_HW_VFE_IN_RDI0 = 3,
+ CAM_ISP_HW_VFE_IN_RDI1 = 4,
+ CAM_ISP_HW_VFE_IN_RDI2 = 5,
+ CAM_ISP_HW_VFE_IN_RDI3 = 6,
+ CAM_ISP_HW_VFE_IN_CAMIF_LITE = 7,
+ CAM_ISP_HW_VFE_IN_MAX,
+};
+
+enum cam_isp_hw_vfe_core {
+ CAM_ISP_HW_VFE_CORE_0,
+ CAM_ISP_HW_VFE_CORE_1,
+ CAM_ISP_HW_VFE_CORE_2,
+ CAM_ISP_HW_VFE_CORE_3,
+ CAM_ISP_HW_VFE_CORE_MAX,
+};
+
+enum cam_vfe_hw_irq_status {
+ CAM_VFE_IRQ_STATUS_ERR_COMP = -3,
+ CAM_VFE_IRQ_STATUS_COMP_OWRT = -2,
+ CAM_VFE_IRQ_STATUS_ERR = -1,
+ CAM_VFE_IRQ_STATUS_SUCCESS = 0,
+ CAM_VFE_IRQ_STATUS_OVERFLOW = 1,
+ CAM_VFE_IRQ_STATUS_P2I_ERROR = 2,
+ CAM_VFE_IRQ_STATUS_VIOLATION = 3,
+ CAM_VFE_IRQ_STATUS_MAX,
+};
+
+enum cam_vfe_hw_irq_regs {
+ CAM_IFE_IRQ_CAMIF_REG_STATUS0 = 0,
+ CAM_IFE_IRQ_CAMIF_REG_STATUS1 = 1,
+ CAM_IFE_IRQ_VIOLATION_STATUS = 2,
+ CAM_IFE_IRQ_REGISTERS_MAX,
+};
+
+enum cam_vfe_bus_irq_regs {
+ CAM_IFE_IRQ_BUS_REG_STATUS0 = 0,
+ CAM_IFE_IRQ_BUS_REG_STATUS1 = 1,
+ CAM_IFE_IRQ_BUS_REG_STATUS2 = 2,
+ CAM_IFE_IRQ_BUS_REG_COMP_ERR = 3,
+ CAM_IFE_IRQ_BUS_REG_COMP_OWRT = 4,
+ CAM_IFE_IRQ_BUS_DUAL_COMP_ERR = 5,
+ CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT = 6,
+ CAM_IFE_BUS_IRQ_REGISTERS_MAX,
+};
+
+enum cam_vfe_reset_type {
+ CAM_VFE_HW_RESET_HW_AND_REG,
+ CAM_VFE_HW_RESET_HW,
+ CAM_VFE_HW_RESET_MAX,
+};
+
+/*
+ * struct cam_vfe_hw_get_hw_cap:
+ *
+ * @max_width: Max width supported by HW
+ * @max_height: Max height supported by HW
+ * @max_pixel_num: Max Pixel channels available
+ * @max_rdi_num: Max Raw channels available
+ */
+struct cam_vfe_hw_get_hw_cap {
+ uint32_t max_width;
+ uint32_t max_height;
+ uint32_t max_pixel_num;
+ uint32_t max_rdi_num;
+};
+
+/*
+ * struct cam_vfe_hw_vfe_out_acquire_args:
+ *
+ * @rsrc_node: Pointer to Resource Node object, filled if acquire
+ * is successful
+ * @out_port_info: Output Port details to acquire
+ * @unique_id: Unique Identity of Context to associate with this
+ * resource. Used for composite grouping of multiple
+ * resources in the same context
+ * @is_dual: Dual VFE or not
+ * @split_id: In case of Dual VFE, this is Left or Right.
+ * (Default is Left if Single VFE)
+ * @is_master: In case of Dual VFE, this is Master or Slave.
+ * (Default is Master in case of Single VFE)
+ * @dual_slave_core: If Master and Slave exists, HW Index of Slave
+ * @cdm_ops: CDM operations
+ * @ctx: Context data
+ */
+struct cam_vfe_hw_vfe_out_acquire_args {
+ struct cam_isp_resource_node *rsrc_node;
+ struct cam_isp_out_port_info *out_port_info;
+ uint32_t unique_id;
+ uint32_t is_dual;
+ enum cam_isp_hw_split_id split_id;
+ uint32_t is_master;
+ uint32_t dual_slave_core;
+ struct cam_cdm_utils_ops *cdm_ops;
+ void *ctx;
+};
+
+/*
+ * struct cam_vfe_hw_vfe_in_acquire_args:
+ *
+ * @rsrc_node: Pointer to Resource Node object, filled if acquire
+ * is successful
+ * @res_id: Resource ID of resource to acquire if specific,
+ * else CAM_ISP_HW_VFE_IN_MAX
+ * @cdm_ops: CDM operations
+ * @sync_mode: In case of Dual VFE, this is Master or Slave.
+ * (Default is Master in case of Single VFE)
+ * @in_port: Input port details to acquire
+ */
+struct cam_vfe_hw_vfe_in_acquire_args {
+ struct cam_isp_resource_node *rsrc_node;
+ uint32_t res_id;
+ void *cdm_ops;
+ enum cam_isp_hw_sync_mode sync_mode;
+ struct cam_isp_in_port_info *in_port;
+};
+
+/*
+ * struct cam_vfe_acquire_args:
+ *
+ * @rsrc_type: Type of Resource (OUT/IN) to acquire
+ * @tasklet: Tasklet to associate with this resource. This is
+ * used to schedule bottom of IRQ events associated
+ * with this resource.
+ * @vfe_out: Acquire args for VFE_OUT
+ * @vfe_in: Acquire args for VFE_IN
+ */
+struct cam_vfe_acquire_args {
+ enum cam_isp_resource_type rsrc_type;
+ void *tasklet;
+ union {
+ struct cam_vfe_hw_vfe_out_acquire_args vfe_out;
+ struct cam_vfe_hw_vfe_in_acquire_args vfe_in;
+ };
+};
+
+/*
+ * struct cam_vfe_clock_update_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @clk_rate: Clock rate requested
+ */
+struct cam_vfe_clock_update_args {
+ struct cam_isp_resource_node *node_res;
+ uint64_t clk_rate;
+};
+
+/*
+ * struct cam_vfe_bw_update_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @camnoc_bw_bytes: Bandwidth vote request for CAMNOC
+ * @external_bw_bytes: Bandwidth vote request from CAMNOC
+ * out to the rest of the path-to-DDR
+ */
+struct cam_vfe_bw_update_args {
+ struct cam_isp_resource_node *node_res;
+ uint64_t camnoc_bw_bytes;
+ uint64_t external_bw_bytes;
+};
+
+enum cam_vfe_bw_control_action {
+ CAM_VFE_BW_CONTROL_EXCLUDE = 0,
+ CAM_VFE_BW_CONTROL_INCLUDE = 1
+};
+
+/*
+ * struct cam_vfe_bw_control_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @action: Bandwidth control action
+ */
+struct cam_vfe_bw_control_args {
+ struct cam_isp_resource_node *node_res;
+ enum cam_vfe_bw_control_action action;
+};
+
+/*
+ * struct cam_vfe_top_irq_evt_payload:
+ *
+ * @Brief: This structure is used to save payload for IRQ
+ * related to VFE_TOP resources
+ *
+ * @list: list_head node for the payload
+ * @core_index: Index of VFE HW that generated this IRQ event
+ * @core_info: Private data of handler in bottom half context
+ * @evt_id: IRQ event
+ * @irq_reg_val: IRQ and Error register values, read when IRQ was
+ * handled
+ * @error_type: Identify different errors
+ * @ts: Timestamp
+ */
+struct cam_vfe_top_irq_evt_payload {
+ struct list_head list;
+ uint32_t core_index;
+ void *core_info;
+ uint32_t evt_id;
+ uint32_t irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX];
+ uint32_t error_type;
+ struct cam_isp_timestamp ts;
+};
+
+/*
+ * struct cam_vfe_bus_irq_evt_payload:
+ *
+ * @Brief: This structure is used to save payload for IRQ
+ * related to VFE_BUS resources
+ *
+ * @list: list_head node for the payload
+ * @core_index: Index of VFE HW that generated this IRQ event
+ * @debug_status_0: Value of debug status_0 register at time of IRQ
+ * @evt_id: IRQ event
+ * @irq_reg_val: IRQ and Error register values, read when IRQ was
+ * handled
+ * @error_type: Identify different errors
+ * @ts: Timestamp
+ * @ctx: Context data received during acquire
+ */
+struct cam_vfe_bus_irq_evt_payload {
+ struct list_head list;
+ uint32_t core_index;
+ uint32_t debug_status_0;
+ uint32_t evt_id;
+ uint32_t irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
+ uint32_t error_type;
+ struct cam_isp_timestamp ts;
+ void *ctx;
+};
+
+/*
+ * struct cam_vfe_irq_handler_priv:
+ *
+ * @Brief: This structure is used as private data to
+ * register with IRQ controller. It has information
+ * needed by top half and bottom half.
+ *
+ * @core_index: Index of VFE HW that generated this IRQ event
+ * @core_info: Private data of handler in bottom half context
+ * @mem_base: Mapped base address of the register space
+ * @reset_complete: Completion structure to be signaled if Reset IRQ
+ * is Set
+ */
+struct cam_vfe_irq_handler_priv {
+ uint32_t core_index;
+ void *core_info;
+ void __iomem *mem_base;
+ struct completion *reset_complete;
+};
+
+/*
+ * cam_vfe_hw_init()
+ *
+ * @Brief: Initialize VFE HW device
+ *
+ * @vfe_hw: vfe_hw interface to fill in and return on
+ * successful initialization
+ * @hw_idx: Index of VFE HW
+ */
+int cam_vfe_hw_init(struct cam_hw_intf **vfe_hw, uint32_t hw_idx);
+
+/*
+ * cam_vfe_put_evt_payload()
+ *
+ * @Brief: Put the evt payload back to free list
+ *
+ * @core_info: VFE HW core_info
+ * @evt_payload: Event payload data
+ */
+int cam_vfe_put_evt_payload(void *core_info,
+ struct cam_vfe_top_irq_evt_payload **evt_payload);
+
+#endif /* _CAM_VFE_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile
new file mode 100644
index 000000000000..e9a20c8a2788
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile
@@ -0,0 +1,15 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_soc.o cam_vfe_dev.o cam_vfe_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += vfe_bus/ vfe_top/ vfe17x/ \ No newline at end of file
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
new file mode 100644
index 000000000000..5e6bb2012fff
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -0,0 +1,829 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/ratelimit.h>
+#include "cam_tasklet_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_vfe_soc.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_top.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_debug_util.h"
+
+static const char drv_name[] = "vfe";
+static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
+ 0x0000006C,
+ 0x00000070,
+ 0x0000007C,
+};
+
+static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+ 0x00000017,
+ 0x00000000,
+};
+
+static uint32_t camif_irq_err_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+ 0x0003FC00,
+ 0xEFFF7EBC,
+};
+
+static uint32_t rdi_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+ 0x780001e0,
+ 0x00000000,
+};
+
+static uint32_t top_reset_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+ 0x80000000,
+ 0x00000000,
+};
+
+static int cam_vfe_get_evt_payload(struct cam_vfe_hw_core_info *core_info,
+ struct cam_vfe_top_irq_evt_payload **evt_payload)
+{
+ spin_lock(&core_info->spin_lock);
+ if (list_empty(&core_info->free_payload_list)) {
+ *evt_payload = NULL;
+ spin_unlock(&core_info->spin_lock);
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core info 0x%x\n",
+ core_info->cpas_handle);
+ return -ENODEV;
+ }
+
+ *evt_payload = list_first_entry(&core_info->free_payload_list,
+ struct cam_vfe_top_irq_evt_payload, list);
+ list_del_init(&(*evt_payload)->list);
+ spin_unlock(&core_info->spin_lock);
+
+ return 0;
+}
+
+int cam_vfe_put_evt_payload(void *core_info,
+ struct cam_vfe_top_irq_evt_payload **evt_payload)
+{
+ struct cam_vfe_hw_core_info *vfe_core_info = core_info;
+ unsigned long flags;
+
+ if (!core_info) {
+ CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+ return -EINVAL;
+ }
+ if (*evt_payload == NULL) {
+ CAM_ERR(CAM_ISP, "No payload to put");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&vfe_core_info->spin_lock, flags);
+ (*evt_payload)->error_type = 0;
+ list_add_tail(&(*evt_payload)->list, &vfe_core_info->free_payload_list);
+ *evt_payload = NULL;
+ spin_unlock_irqrestore(&vfe_core_info->spin_lock, flags);
+
+
+ return 0;
+}
+
+int cam_vfe_get_hw_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size)
+{
+ struct cam_hw_info *vfe_dev = hw_priv;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ int rc = 0;
+
+ CAM_DBG(CAM_ISP, "Enter");
+ if (!hw_priv) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ core_info = (struct cam_vfe_hw_core_info *)vfe_dev->core_info;
+
+ if (core_info->vfe_top->hw_ops.get_hw_caps)
+ core_info->vfe_top->hw_ops.get_hw_caps(
+ core_info->vfe_top->top_priv,
+ get_hw_cap_args, arg_size);
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
+}
+
+int cam_vfe_reset_irq_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int32_t rc = -EINVAL;
+ struct cam_vfe_irq_handler_priv *handler_priv;
+
+ handler_priv = th_payload->handler_priv;
+
+ CAM_DBG(CAM_ISP, "Enter");
+ CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+
+ if (th_payload->evt_status_arr[0] & (1<<31)) {
+ /*
+ * Clear All IRQs to avoid spurious IRQs immediately
+ * after Reset Done.
+ */
+ cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
+ cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
+ cam_io_w(0x1, handler_priv->mem_base + 0x58);
+ CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
+ complete(handler_priv->reset_complete);
+
+
+ rc = 0;
+ }
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
+}
+
+static int cam_vfe_irq_err_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int32_t rc;
+ int i;
+ struct cam_vfe_irq_handler_priv *handler_priv;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_vfe_hw_core_info *core_info;
+ bool error_flag = false;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+ th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+ handler_priv = th_payload->handler_priv;
+ core_info = handler_priv->core_info;
+ /*
+ * need to handle overflow condition here, otherwise irq storm
+ * will block everything
+ */
+ if (th_payload->evt_status_arr[1] ||
+ (th_payload->evt_status_arr[0] & camif_irq_err_reg_mask[0])) {
+ CAM_ERR(CAM_ISP,
+ "Encountered Error: vfe:%d: Irq_status0=0x%x Status1=0x%x",
+ handler_priv->core_index, th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1]);
+ CAM_ERR(CAM_ISP,
+ "Stopping further IRQ processing from this HW index=%d",
+ handler_priv->core_index);
+ cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
+ core_info->irq_err_handle);
+ cam_irq_controller_clear_and_mask(evt_id,
+ core_info->vfe_irq_controller);
+ error_flag = true;
+ }
+
+ rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1]);
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->core_index = handler_priv->core_index;
+ evt_payload->core_info = handler_priv->core_info;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
+ evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+ irq_reg_offset[i]);
+ }
+
+ if (error_flag)
+ CAM_INFO(CAM_ISP, "Violation status = %x",
+ evt_payload->irq_reg_val[2]);
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ return rc;
+}
+
+int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *vfe_hw = hw_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_isp_resource_node *isp_res = NULL;
+ int rc = 0;
+ uint32_t reset_core_args =
+ CAM_VFE_HW_RESET_HW_AND_REG;
+
+ CAM_DBG(CAM_ISP, "Enter");
+ if (!hw_priv) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ mutex_lock(&vfe_hw->hw_mutex);
+ vfe_hw->open_count++;
+ if (vfe_hw->open_count > 1) {
+ mutex_unlock(&vfe_hw->hw_mutex);
+ CAM_DBG(CAM_ISP, "VFE has already been initialized cnt %d",
+ vfe_hw->open_count);
+ return 0;
+ }
+ mutex_unlock(&vfe_hw->hw_mutex);
+
+ soc_info = &vfe_hw->soc_info;
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+ /* Turn ON Regulators, Clocks and other SOC resources */
+ rc = cam_vfe_enable_soc_resources(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Enable SOC failed");
+ rc = -EFAULT;
+ goto decrement_open_cnt;
+ }
+
+ isp_res = (struct cam_isp_resource_node *)init_hw_args;
+ if (isp_res && isp_res->init) {
+ rc = isp_res->init(isp_res, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "init Failed rc=%d", rc);
+ goto disable_soc;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "Enable soc done");
+
+ /* Do HW Reset */
+ rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
+ goto deinint_vfe_res;
+ }
+
+ rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv,
+ NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Bus HW init Failed rc=%d", rc);
+ goto deinint_vfe_res;
+ }
+
+ rc = core_info->vfe_top->hw_ops.init(core_info->vfe_top->top_priv,
+ NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Top HW init Failed rc=%d", rc);
+ goto deinint_vfe_res;
+ }
+
+ vfe_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ return rc;
+
+deinint_vfe_res:
+ if (isp_res && isp_res->deinit)
+ isp_res->deinit(isp_res, NULL, 0);
+disable_soc:
+ cam_vfe_disable_soc_resources(soc_info);
+decrement_open_cnt:
+ mutex_lock(&vfe_hw->hw_mutex);
+ vfe_hw->open_count--;
+ mutex_unlock(&vfe_hw->hw_mutex);
+ return rc;
+}
+
+int cam_vfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *vfe_hw = hw_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_isp_resource_node *isp_res = NULL;
+ int rc = 0;
+ uint32_t reset_core_args =
+ CAM_VFE_HW_RESET_HW_AND_REG;
+
+ CAM_DBG(CAM_ISP, "Enter");
+ if (!hw_priv) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ mutex_lock(&vfe_hw->hw_mutex);
+ if (!vfe_hw->open_count) {
+ mutex_unlock(&vfe_hw->hw_mutex);
+ CAM_ERR(CAM_ISP, "Error! Unbalanced deinit");
+ return -EFAULT;
+ }
+ vfe_hw->open_count--;
+ if (vfe_hw->open_count) {
+ mutex_unlock(&vfe_hw->hw_mutex);
+ CAM_DBG(CAM_ISP, "open_cnt non-zero =%d", vfe_hw->open_count);
+ return 0;
+ }
+ mutex_unlock(&vfe_hw->hw_mutex);
+
+ soc_info = &vfe_hw->soc_info;
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+ rc = core_info->vfe_bus->hw_ops.deinit(core_info->vfe_bus->bus_priv,
+ NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Bus HW deinit Failed rc=%d", rc);
+
+ isp_res = (struct cam_isp_resource_node *)deinit_hw_args;
+ if (isp_res && isp_res->deinit) {
+ rc = isp_res->deinit(isp_res, NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ISP, "deinit failed");
+ }
+
+ rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
+
+ /* Turn OFF Regulators, Clocks and other SOC resources */
+ CAM_DBG(CAM_ISP, "Disable SOC resource");
+ rc = cam_vfe_disable_soc_resources(soc_info);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Disable SOC failed");
+
+ vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
+}
+
+int cam_vfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+ struct cam_hw_info *vfe_hw = hw_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ int rc;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ if (!hw_priv) {
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
+ return -EINVAL;
+ }
+
+ soc_info = &vfe_hw->soc_info;
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+ core_info->irq_payload.core_index = soc_info->index;
+ core_info->irq_payload.mem_base =
+ vfe_hw->soc_info.reg_map[VFE_CORE_BASE_IDX].mem_base;
+ core_info->irq_payload.core_info = core_info;
+ core_info->irq_payload.reset_complete = &vfe_hw->hw_complete;
+
+ core_info->irq_handle = cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_0,
+ top_reset_irq_reg_mask, &core_info->irq_payload,
+ cam_vfe_reset_irq_top_half, NULL, NULL, NULL);
+ if (core_info->irq_handle < 0) {
+ CAM_ERR(CAM_ISP, "subscribe irq controller failed");
+ return -EFAULT;
+ }
+
+ reinit_completion(&vfe_hw->hw_complete);
+
+ CAM_DBG(CAM_ISP, "calling RESET on vfe %d", soc_info->index);
+ core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv,
+ reset_core_args, arg_size);
+ CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
+ /* Wait for Completion or Timeout of 500ms */
+ rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
+ if (!rc)
+ CAM_ERR(CAM_ISP, "Error! Reset Timeout");
+
+ CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
+
+ rc = cam_irq_controller_unsubscribe_irq(
+ core_info->vfe_irq_controller, core_info->irq_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Error! Unsubscribe failed");
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
+}
+
+void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp)
+{
+ struct timespec ts;
+
+ get_monotonic_boottime(&ts);
+ time_stamp->mono_time.tv_sec = ts.tv_sec;
+ time_stamp->mono_time.tv_usec = ts.tv_nsec/1000;
+}
+
+static int cam_vfe_irq_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int32_t rc;
+ int i;
+ struct cam_vfe_irq_handler_priv *handler_priv;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_vfe_hw_core_info *core_info;
+
+ handler_priv = th_payload->handler_priv;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+ rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1]);
+ return rc;
+ }
+
+ core_info = handler_priv->core_info;
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->core_index = handler_priv->core_index;
+ evt_payload->core_info = handler_priv->core_info;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
+ evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+ irq_reg_offset[i]);
+ }
+ CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
+}
+
+int cam_vfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size)
+{
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_hw_info *vfe_hw = hw_priv;
+ struct cam_vfe_acquire_args *acquire;
+ int rc = -ENODEV;
+
+
+ if (!hw_priv || !reserve_args || (arg_size !=
+ sizeof(struct cam_vfe_acquire_args))) {
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
+ return -EINVAL;
+ }
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+ acquire = (struct cam_vfe_acquire_args *)reserve_args;
+
+ mutex_lock(&vfe_hw->hw_mutex);
+ if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_IN)
+ rc = core_info->vfe_top->hw_ops.reserve(
+ core_info->vfe_top->top_priv,
+ acquire,
+ sizeof(*acquire));
+ else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_OUT)
+ rc = core_info->vfe_bus->hw_ops.reserve(
+ core_info->vfe_bus->bus_priv, acquire,
+ sizeof(*acquire));
+ else
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", acquire->rsrc_type);
+
+ mutex_unlock(&vfe_hw->hw_mutex);
+
+ return rc;
+}
+
+
+int cam_vfe_release(void *hw_priv, void *release_args, uint32_t arg_size)
+{
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_hw_info *vfe_hw = hw_priv;
+ struct cam_isp_resource_node *isp_res;
+ int rc = -ENODEV;
+
+ if (!hw_priv || !release_args ||
+ (arg_size != sizeof(struct cam_isp_resource_node))) {
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
+ return -EINVAL;
+ }
+
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+ isp_res = (struct cam_isp_resource_node *) release_args;
+
+ mutex_lock(&vfe_hw->hw_mutex);
+ if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN)
+ rc = core_info->vfe_top->hw_ops.release(
+ core_info->vfe_top->top_priv, isp_res,
+ sizeof(*isp_res));
+ else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT)
+ rc = core_info->vfe_bus->hw_ops.release(
+ core_info->vfe_bus->bus_priv, isp_res,
+ sizeof(*isp_res));
+ else
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
+
+ mutex_unlock(&vfe_hw->hw_mutex);
+
+ return rc;
+}
+
+
+int cam_vfe_start(void *hw_priv, void *start_args, uint32_t arg_size)
+{
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_hw_info *vfe_hw = hw_priv;
+ struct cam_isp_resource_node *isp_res;
+ int rc = 0;
+
+ if (!hw_priv || !start_args ||
+ (arg_size != sizeof(struct cam_isp_resource_node))) {
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
+ return -EINVAL;
+ }
+
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+ isp_res = (struct cam_isp_resource_node *)start_args;
+ core_info->tasklet_info = isp_res->tasklet_info;
+
+ mutex_lock(&vfe_hw->hw_mutex);
+ if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
+ if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+ isp_res->irq_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_1,
+ camif_irq_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_top_half,
+ cam_ife_mgr_do_tasklet,
+ isp_res->tasklet_info,
+ &tasklet_bh_api);
+ if (isp_res->irq_handle < 1)
+ rc = -ENOMEM;
+ } else if (isp_res->rdi_only_ctx) {
+ isp_res->irq_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_1,
+ rdi_irq_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_top_half,
+ cam_ife_mgr_do_tasklet,
+ isp_res->tasklet_info,
+ &tasklet_bh_api);
+ if (isp_res->irq_handle < 1)
+ rc = -ENOMEM;
+ }
+
+ if (rc == 0) {
+ rc = core_info->vfe_top->hw_ops.start(
+ core_info->vfe_top->top_priv, isp_res,
+ sizeof(struct cam_isp_resource_node));
+ if (rc)
+ CAM_ERR(CAM_ISP, "Start failed. type:%d",
+ isp_res->res_type);
+ } else {
+ CAM_ERR(CAM_ISP,
+ "Error! subscribe irq controller failed");
+ }
+ } else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
+ rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
+ } else {
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
+ rc = -EFAULT;
+ }
+
+ if (!core_info->irq_err_handle) {
+ core_info->irq_err_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_0,
+ camif_irq_err_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_err_top_half,
+ cam_ife_mgr_do_tasklet,
+ core_info->tasklet_info,
+ &tasklet_bh_api);
+ if (core_info->irq_err_handle < 1) {
+ CAM_ERR(CAM_ISP, "Error handle subscribe failure");
+ rc = -ENOMEM;
+ core_info->irq_err_handle = 0;
+ }
+ }
+
+ mutex_unlock(&vfe_hw->hw_mutex);
+
+ return rc;
+}
+
+int cam_vfe_stop(void *hw_priv, void *stop_args, uint32_t arg_size)
+{
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_hw_info *vfe_hw = hw_priv;
+ struct cam_isp_resource_node *isp_res;
+ int rc = -EINVAL;
+
+ if (!hw_priv || !stop_args ||
+ (arg_size != sizeof(struct cam_isp_resource_node))) {
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
+ return -EINVAL;
+ }
+
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+ isp_res = (struct cam_isp_resource_node *)stop_args;
+
+ mutex_lock(&vfe_hw->hw_mutex);
+ if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
+ cam_irq_controller_unsubscribe_irq(
+ core_info->vfe_irq_controller, isp_res->irq_handle);
+ isp_res->irq_handle = 0;
+
+ rc = core_info->vfe_top->hw_ops.stop(
+ core_info->vfe_top->top_priv, isp_res,
+ sizeof(struct cam_isp_resource_node));
+ } else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
+ rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0);
+ } else {
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
+ }
+
+ if (core_info->irq_err_handle) {
+ cam_irq_controller_unsubscribe_irq(
+ core_info->vfe_irq_controller,
+ core_info->irq_err_handle);
+ core_info->irq_err_handle = 0;
+ }
+
+ mutex_unlock(&vfe_hw->hw_mutex);
+
+ return rc;
+}
+
+int cam_vfe_read(void *hw_priv, void *read_args, uint32_t arg_size)
+{
+ return -EPERM;
+}
+
+int cam_vfe_write(void *hw_priv, void *write_args, uint32_t arg_size)
+{
+ return -EPERM;
+}
+
+int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *vfe_hw = hw_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_vfe_hw_info *hw_info = NULL;
+ int rc = 0;
+
+ if (!hw_priv) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ soc_info = &vfe_hw->soc_info;
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+ hw_info = core_info->vfe_hw_info;
+
+ switch (cmd_type) {
+ case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+ case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+ case CAM_ISP_HW_CMD_BW_UPDATE:
+ case CAM_ISP_HW_CMD_BW_CONTROL:
+ rc = core_info->vfe_top->hw_ops.process_cmd(
+ core_info->vfe_top->top_priv, cmd_type, cmd_args,
+ arg_size);
+ break;
+ case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+ case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+ case CAM_ISP_HW_CMD_STRIPE_UPDATE:
+ case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ:
+ case CAM_ISP_HW_CMD_UBWC_UPDATE:
+ rc = core_info->vfe_bus->hw_ops.process_cmd(
+ core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
+ arg_size);
+ break;
+
+ default:
+ CAM_ERR(CAM_ISP, "Invalid cmd type:%d", cmd_type);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+irqreturn_t cam_vfe_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *vfe_hw;
+ struct cam_vfe_hw_core_info *core_info;
+
+ if (!data)
+ return IRQ_NONE;
+
+ vfe_hw = (struct cam_hw_info *)data;
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+ return cam_irq_controller_handle_irq(irq_num,
+ core_info->vfe_irq_controller);
+}
+
+int cam_vfe_core_init(struct cam_vfe_hw_core_info *core_info,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ struct cam_vfe_hw_info *vfe_hw_info)
+{
+ int rc = -EINVAL;
+ int i;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ rc = cam_irq_controller_init(drv_name,
+ CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX),
+ vfe_hw_info->irq_reg_info, &core_info->vfe_irq_controller);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
+ return rc;
+ }
+
+ rc = cam_vfe_top_init(vfe_hw_info->top_version,
+ soc_info, hw_intf, vfe_hw_info->top_hw_info,
+ &core_info->vfe_top);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! cam_vfe_top_init failed");
+ goto deinit_controller;
+ }
+
+ rc = cam_vfe_bus_init(vfe_hw_info->bus_version, soc_info, hw_intf,
+ vfe_hw_info->bus_hw_info, core_info->vfe_irq_controller,
+ &core_info->vfe_bus);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! cam_vfe_bus_init failed");
+ goto deinit_top;
+ }
+
+ INIT_LIST_HEAD(&core_info->free_payload_list);
+ for (i = 0; i < CAM_VFE_EVT_MAX; i++) {
+ INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+ list_add_tail(&core_info->evt_payload[i].list,
+ &core_info->free_payload_list);
+ }
+
+ spin_lock_init(&core_info->spin_lock);
+
+ return rc;
+
+deinit_top:
+ cam_vfe_top_deinit(vfe_hw_info->top_version,
+ &core_info->vfe_top);
+
+deinit_controller:
+ cam_irq_controller_deinit(&core_info->vfe_irq_controller);
+
+ return rc;
+}
+
+int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info,
+ struct cam_vfe_hw_info *vfe_hw_info)
+{
+ int rc = -EINVAL;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&core_info->spin_lock, flags);
+
+ INIT_LIST_HEAD(&core_info->free_payload_list);
+ for (i = 0; i < CAM_VFE_EVT_MAX; i++)
+ INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+
+ rc = cam_vfe_bus_deinit(vfe_hw_info->bus_version,
+ &core_info->vfe_bus);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Error cam_vfe_bus_deinit failed rc=%d", rc);
+
+ rc = cam_vfe_top_deinit(vfe_hw_info->top_version,
+ &core_info->vfe_top);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Error cam_vfe_top_deinit failed rc=%d", rc);
+
+ rc = cam_irq_controller_deinit(&core_info->vfe_irq_controller);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error cam_irq_controller_deinit failed rc=%d", rc);
+
+ spin_unlock_irqrestore(&core_info->spin_lock, flags);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
new file mode 100644
index 000000000000..9cec56a15fec
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_CORE_H_
+#define _CAM_VFE_CORE_H_
+
+#include <linux/spinlock.h>
+#include "cam_hw_intf.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_hw_intf.h"
+
+struct cam_vfe_hw_info {
+ struct cam_irq_controller_reg_info *irq_reg_info;
+
+ uint32_t bus_version;
+ void *bus_hw_info;
+
+ uint32_t top_version;
+ void *top_hw_info;
+ uint32_t camif_version;
+ void *camif_reg;
+
+ uint32_t camif_lite_version;
+ void *camif_lite_reg;
+
+ uint32_t testgen_version;
+ void *testgen_reg;
+
+ uint32_t num_qos_settings;
+ struct cam_isp_reg_val_pair *qos_settings;
+
+ uint32_t num_ds_settings;
+ struct cam_isp_reg_val_pair *ds_settings;
+
+ uint32_t num_vbif_settings;
+ struct cam_isp_reg_val_pair *vbif_settings;
+};
+
+#define CAM_VFE_EVT_MAX 256
+
+struct cam_vfe_hw_core_info {
+ struct cam_vfe_hw_info *vfe_hw_info;
+ void *vfe_irq_controller;
+ struct cam_vfe_top *vfe_top;
+ struct cam_vfe_bus *vfe_bus;
+ void *tasklet_info;
+ struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_EVT_MAX];
+ struct list_head free_payload_list;
+ struct cam_vfe_irq_handler_priv irq_payload;
+ uint32_t cpas_handle;
+ int irq_handle;
+ int irq_err_handle;
+ spinlock_t spin_lock;
+};
+
+int cam_vfe_get_hw_caps(void *device_priv,
+ void *get_hw_cap_args, uint32_t arg_size);
+int cam_vfe_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_vfe_deinit_hw(void *hw_priv,
+ void *deinit_hw_args, uint32_t arg_size);
+int cam_vfe_reset(void *device_priv,
+ void *reset_core_args, uint32_t arg_size);
+int cam_vfe_reserve(void *device_priv,
+ void *reserve_args, uint32_t arg_size);
+int cam_vfe_release(void *device_priv,
+ void *reserve_args, uint32_t arg_size);
+int cam_vfe_start(void *device_priv,
+ void *start_args, uint32_t arg_size);
+int cam_vfe_stop(void *device_priv,
+ void *stop_args, uint32_t arg_size);
+int cam_vfe_read(void *device_priv,
+ void *read_args, uint32_t arg_size);
+int cam_vfe_write(void *device_priv,
+ void *write_args, uint32_t arg_size);
+int cam_vfe_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_vfe_irq(int irq_num, void *data);
+
+int cam_vfe_core_init(struct cam_vfe_hw_core_info *core_info,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ struct cam_vfe_hw_info *vfe_hw_info);
+
+int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info,
+ struct cam_vfe_hw_info *vfe_hw_info);
+
+#endif /* _CAM_VFE_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
new file mode 100644
index 000000000000..66b647d3136b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_vfe_dev.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_soc.h"
+#include "cam_debug_util.h"
+
+static struct cam_hw_intf *cam_vfe_hw_list[CAM_VFE_HW_NUM_MAX] = {0, 0, 0, 0};
+
+static char vfe_dev_name[8];
+
+int cam_vfe_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *vfe_hw = NULL;
+ struct cam_hw_intf *vfe_hw_intf = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_vfe_hw_info *hw_info = NULL;
+ int rc = 0;
+
+ vfe_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!vfe_hw_intf) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &vfe_hw_intf->hw_idx);
+
+ vfe_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!vfe_hw) {
+ rc = -ENOMEM;
+ goto free_vfe_hw_intf;
+ }
+
+ memset(vfe_dev_name, 0, sizeof(vfe_dev_name));
+ snprintf(vfe_dev_name, sizeof(vfe_dev_name),
+ "vfe%1u", vfe_hw_intf->hw_idx);
+
+ vfe_hw->soc_info.pdev = pdev;
+ vfe_hw->soc_info.dev = &pdev->dev;
+ vfe_hw->soc_info.dev_name = vfe_dev_name;
+ vfe_hw_intf->hw_priv = vfe_hw;
+ vfe_hw_intf->hw_ops.get_hw_caps = cam_vfe_get_hw_caps;
+ vfe_hw_intf->hw_ops.init = cam_vfe_init_hw;
+ vfe_hw_intf->hw_ops.deinit = cam_vfe_deinit_hw;
+ vfe_hw_intf->hw_ops.reset = cam_vfe_reset;
+ vfe_hw_intf->hw_ops.reserve = cam_vfe_reserve;
+ vfe_hw_intf->hw_ops.release = cam_vfe_release;
+ vfe_hw_intf->hw_ops.start = cam_vfe_start;
+ vfe_hw_intf->hw_ops.stop = cam_vfe_stop;
+ vfe_hw_intf->hw_ops.read = cam_vfe_read;
+ vfe_hw_intf->hw_ops.write = cam_vfe_write;
+ vfe_hw_intf->hw_ops.process_cmd = cam_vfe_process_cmd;
+ vfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_VFE;
+
+ CAM_DBG(CAM_ISP, "type %d index %d",
+ vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+
+ platform_set_drvdata(pdev, vfe_hw_intf);
+
+ vfe_hw->core_info = kzalloc(sizeof(struct cam_vfe_hw_core_info),
+ GFP_KERNEL);
+ if (!vfe_hw->core_info) {
+ CAM_DBG(CAM_ISP, "Failed to alloc for core");
+ rc = -ENOMEM;
+ goto free_vfe_hw;
+ }
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_ERR(CAM_ISP, "Of_match Failed");
+ rc = -EINVAL;
+ goto free_core_info;
+ }
+ hw_info = (struct cam_vfe_hw_info *)match_dev->data;
+ core_info->vfe_hw_info = hw_info;
+
+ rc = cam_vfe_init_soc_resources(&vfe_hw->soc_info, cam_vfe_irq,
+ vfe_hw);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "Failed to init soc rc=%d", rc);
+ goto free_core_info;
+ }
+
+ rc = cam_vfe_core_init(core_info, &vfe_hw->soc_info,
+ vfe_hw_intf, hw_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "Failed to init core rc=%d", rc);
+ goto deinit_soc;
+ }
+
+ vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&vfe_hw->hw_mutex);
+ spin_lock_init(&vfe_hw->hw_lock);
+ init_completion(&vfe_hw->hw_complete);
+
+ if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX)
+ cam_vfe_hw_list[vfe_hw_intf->hw_idx] = vfe_hw_intf;
+
+ cam_vfe_init_hw(vfe_hw, NULL, 0);
+ cam_vfe_deinit_hw(vfe_hw, NULL, 0);
+
+ CAM_DBG(CAM_ISP, "VFE%d probe successful", vfe_hw_intf->hw_idx);
+
+ return rc;
+
+deinit_soc:
+ if (cam_vfe_deinit_soc_resources(&vfe_hw->soc_info))
+ CAM_ERR(CAM_ISP, "Failed to deinit soc");
+free_core_info:
+ kfree(vfe_hw->core_info);
+free_vfe_hw:
+ kfree(vfe_hw);
+free_vfe_hw_intf:
+ kfree(vfe_hw_intf);
+end:
+ return rc;
+}
+
+int cam_vfe_remove(struct platform_device *pdev)
+{
+ struct cam_hw_info *vfe_hw = NULL;
+ struct cam_hw_intf *vfe_hw_intf = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ int rc = 0;
+
+ vfe_hw_intf = platform_get_drvdata(pdev);
+ if (!vfe_hw_intf) {
+ CAM_ERR(CAM_ISP, "Error! No data in pdev");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "type %d index %d",
+ vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+
+ if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX)
+ cam_vfe_hw_list[vfe_hw_intf->hw_idx] = NULL;
+
+ vfe_hw = vfe_hw_intf->hw_priv;
+ if (!vfe_hw) {
+ CAM_ERR(CAM_ISP, "Error! HW data is NULL");
+ rc = -ENODEV;
+ goto free_vfe_hw_intf;
+ }
+
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+ if (!core_info) {
+ CAM_ERR(CAM_ISP, "Error! core data NULL");
+ rc = -EINVAL;
+ goto deinit_soc;
+ }
+
+ rc = cam_vfe_core_deinit(core_info, core_info->vfe_hw_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP, "Failed to deinit core rc=%d", rc);
+
+ kfree(vfe_hw->core_info);
+
+deinit_soc:
+ rc = cam_vfe_deinit_soc_resources(&vfe_hw->soc_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP, "Failed to deinit soc rc=%d", rc);
+
+ mutex_destroy(&vfe_hw->hw_mutex);
+ kfree(vfe_hw);
+
+ CAM_DBG(CAM_ISP, "VFE%d remove successful", vfe_hw_intf->hw_idx);
+
+free_vfe_hw_intf:
+ kfree(vfe_hw_intf);
+
+ return rc;
+}
+
+int cam_vfe_hw_init(struct cam_hw_intf **vfe_hw, uint32_t hw_idx)
+{
+ int rc = 0;
+
+ if (cam_vfe_hw_list[hw_idx]) {
+ *vfe_hw = cam_vfe_hw_list[hw_idx];
+ rc = 0;
+ } else {
+ *vfe_hw = NULL;
+ rc = -ENODEV;
+ }
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
new file mode 100644
index 000000000000..5b8a08bd6929
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_DEV_H_
+#define _CAM_VFE_DEV_H_
+
+#include <linux/platform_device.h>
+
+/*
+ * cam_vfe_probe()
+ *
+ * @brief: Driver probe function called on Boot
+ *
+ * @pdev: Platform Device pointer
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_probe(struct platform_device *pdev);
+
+/*
+ * cam_vfe_remove()
+ *
+ * @brief: Driver remove function
+ *
+ * @pdev: Platform Device pointer
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_remove(struct platform_device *pdev);
+
+#endif /* _CAM_VFE_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
new file mode 100644
index 000000000000..12b7a03c6276
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -0,0 +1,293 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "cam_cpas_api.h"
+#include "cam_vfe_soc.h"
+#include "cam_debug_util.h"
+
+static bool cam_vfe_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ bool error_handled = false;
+
+ if (!irq_data)
+ return error_handled;
+
+ switch (irq_data->irq_type) {
+ case CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR:
+ case CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "IFE UBWC Encode error type=%d status=%x",
+ irq_data->irq_type,
+ irq_data->u.enc_err.encerr_status.value);
+ error_handled = true;
+ break;
+ default:
+ break;
+ }
+
+ return error_handled;
+}
+
+static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! get DT properties failed rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int cam_vfe_request_platform_resource(
+ struct cam_hw_soc_info *soc_info,
+ irq_handler_t vfe_irq_handler, void *irq_data)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_request_platform_resource(soc_info, vfe_irq_handler,
+ irq_data);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error! Request platform resource failed rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_vfe_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error! Release platform resource failed rc=%d", rc);
+
+ return rc;
+}
+
+int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t vfe_irq_handler, void *irq_data)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+ struct cam_cpas_register_params cpas_register_param;
+
+ soc_private = kzalloc(sizeof(struct cam_vfe_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed");
+ return -ENOMEM;
+ }
+ soc_info->soc_private = soc_private;
+
+ rc = cam_vfe_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "Error! Get DT properties failed rc=%d", rc);
+ goto free_soc_private;
+ }
+
+ rc = cam_soc_util_get_option_clk_by_name(soc_info,
+ CAM_VFE_DSP_CLK_NAME, &soc_private->dsp_clk,
+ &soc_private->dsp_clk_index, &soc_private->dsp_clk_rate);
+ if (rc)
+ CAM_WARN(CAM_ISP, "option clk get failed");
+
+ rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
+ irq_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP,
+ "Error! Request platform resources failed rc=%d", rc);
+ goto free_soc_private;
+ }
+
+ memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+ strlcpy(cpas_register_param.identifier, "ife",
+ CAM_HW_IDENTIFIER_LENGTH);
+ cpas_register_param.cell_index = soc_info->index;
+ cpas_register_param.dev = soc_info->dev;
+ cpas_register_param.cam_cpas_client_cb = cam_vfe_cpas_cb;
+ cpas_register_param.userdata = soc_info;
+ rc = cam_cpas_register_client(&cpas_register_param);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+ goto release_soc;
+ } else {
+ soc_private->cpas_handle = cpas_register_param.client_handle;
+ }
+
+ return rc;
+
+release_soc:
+ cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+ kfree(soc_private);
+
+ return rc;
+}
+
+int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error! soc_info NULL");
+ return -ENODEV;
+ }
+
+ soc_private = soc_info->soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error! soc_private NULL");
+ return -ENODEV;
+ }
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+ rc = cam_vfe_release_platform_resource(soc_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Release platform resources failed rc=%d", rc);
+
+ rc = cam_soc_util_clk_put(&soc_private->dsp_clk);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error Put dsp clk failed rc=%d", rc);
+
+ kfree(soc_private);
+
+ return rc;
+}
+
+int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error! Invalid params");
+ rc = -EINVAL;
+ goto end;
+ }
+ soc_private = soc_info->soc_private;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+
+ axi_vote.compressed_bw = 10640000000L;
+ axi_vote.uncompressed_bw = 10640000000L;
+
+ rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! CPAS start failed rc=%d", rc);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! enable platform failed rc=%d", rc);
+ goto stop_cpas;
+ }
+
+ return rc;
+
+stop_cpas:
+ cam_cpas_stop(soc_private->cpas_handle);
+end:
+ return rc;
+}
+
+int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+ const char *clk_name)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ rc = -EINVAL;
+ return rc;
+ }
+ soc_private = soc_info->soc_private;
+
+ if (strcmp(clk_name, CAM_VFE_DSP_CLK_NAME) == 0) {
+ rc = cam_soc_util_clk_enable(soc_private->dsp_clk,
+ CAM_VFE_DSP_CLK_NAME, soc_private->dsp_clk_rate);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error enable dsp clk failed rc=%d", rc);
+ }
+
+ return rc;
+}
+
+int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+ const char *clk_name)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ rc = -EINVAL;
+ return rc;
+ }
+ soc_private = soc_info->soc_private;
+
+ if (strcmp(clk_name, CAM_VFE_DSP_CLK_NAME) == 0) {
+ rc = cam_soc_util_clk_disable(soc_private->dsp_clk,
+ CAM_VFE_DSP_CLK_NAME);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error enable dsp clk failed rc=%d", rc);
+ }
+
+ return rc;
+}
+
+
+int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error! Invalid params");
+ rc = -EINVAL;
+ return rc;
+ }
+ soc_private = soc_info->soc_private;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Disable platform failed rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_cpas_stop(soc_private->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
new file mode 100644
index 000000000000..3f862e9249f8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
@@ -0,0 +1,116 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_SOC_H_
+#define _CAM_VFE_SOC_H_
+
+#include "cam_soc_util.h"
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_DSP_CLK_NAME "ife_dsp_clk"
+
+/*
+ * struct cam_vfe_soc_private:
+ *
+ * @Brief: Private SOC data specific to VFE HW Driver
+ *
+ * @cpas_handle: Handle returned on registering with CPAS driver.
+ * This handle is used for all further interface
+ * with CPAS.
+ */
+struct cam_vfe_soc_private {
+ uint32_t cpas_handle;
+ struct clk *dsp_clk;
+ int32_t dsp_clk_index;
+ int32_t dsp_clk_rate;
+};
+
+/*
+ * cam_vfe_init_soc_resources()
+ *
+ * @Brief: Initialize SOC resources including private data
+ *
+ * @soc_info: Device soc information
+ * @handler: Irq handler function pointer
+ * @irq_data: Irq handler function Callback data
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t vfe_irq_handler, void *irq_data);
+
+/*
+ * cam_vfe_deinit_soc_resources()
+ *
+ * @Brief: Deinitialize SOC resources including private data
+ *
+ * @soc_info: Device soc information
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_vfe_enable_soc_resources()
+ *
+ * @brief: Enable regulator, irq resources, start CPAS
+ *
+ * @soc_info: Device soc information
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_vfe_disable_soc_resources()
+ *
+ * @brief: Disable regulator, irq resources, stop CPAS
+ *
+ * @soc_info: Device soc information
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_vfe_soc_enable_clk()
+ *
+ * @brief: Enable clock with given name
+ *
+ * @soc_info: Device soc information
+ * @clk_name: Name of clock to enable
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+ const char *clk_name);
+
+/*
+ * cam_vfe_soc_disable_dsp_clk()
+ *
+ * @brief: Disable clock with given name
+ *
+ * @soc_info: Device soc information
+ * @clk_name: Name of clock to enable
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+ const char *clk_name);
+
+#endif /* _CAM_VFE_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/Makefile
new file mode 100644
index 000000000000..deeae35cef79
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe170.o cam_vfe_lite170.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
new file mode 100644
index 000000000000..d002f84015de
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
@@ -0,0 +1,52 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_vfe170.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_dev.h"
+
+static const struct of_device_id cam_vfe170_dt_match[] = {
+ {
+ .compatible = "qcom,vfe170",
+ .data = &cam_vfe170_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_vfe170_dt_match);
+
+static struct platform_driver cam_vfe170_driver = {
+ .probe = cam_vfe_probe,
+ .remove = cam_vfe_remove,
+ .driver = {
+ .name = "cam_vfe170",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_vfe170_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_vfe170_init_module(void)
+{
+ return platform_driver_register(&cam_vfe170_driver);
+}
+
+static void __exit cam_vfe170_exit_module(void)
+{
+ platform_driver_unregister(&cam_vfe170_driver);
+}
+
+module_init(cam_vfe170_init_module);
+module_exit(cam_vfe170_exit_module);
+MODULE_DESCRIPTION("CAM VFE170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
new file mode 100644
index 000000000000..d1284d9f23d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -0,0 +1,842 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE170_H_
+#define _CAM_VFE170_H_
+
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_core.h"
+
+static struct cam_irq_register_set vfe170_top_irq_reg_set[2] = {
+ {
+ .mask_reg_offset = 0x0000005C,
+ .clear_reg_offset = 0x00000064,
+ .status_reg_offset = 0x0000006C,
+ },
+ {
+ .mask_reg_offset = 0x00000060,
+ .clear_reg_offset = 0x00000068,
+ .status_reg_offset = 0x00000070,
+ },
+};
+
+static struct cam_irq_controller_reg_info vfe170_top_irq_reg_info = {
+ .num_registers = 2,
+ .irq_reg_set = vfe170_top_irq_reg_set,
+ .global_clear_offset = 0x00000058,
+ .global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_camif_ver2_reg vfe170_camif_reg = {
+ .camif_cmd = 0x00000478,
+ .camif_config = 0x0000047C,
+ .line_skip_pattern = 0x00000488,
+ .pixel_skip_pattern = 0x0000048C,
+ .skip_period = 0x00000490,
+ .irq_subsample_pattern = 0x0000049C,
+ .epoch_irq = 0x000004A0,
+ .raw_crop_width_cfg = 0x00000CE4,
+ .raw_crop_height_cfg = 0x00000CE8,
+ .reg_update_cmd = 0x000004AC,
+ .vfe_diag_config = 0x00000C48,
+ .vfe_diag_sensor_status = 0x00000C4C,
+};
+
+static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = {
+ .raw_crop_first_pixel_shift = 16,
+ .raw_crop_first_pixel_mask = 0xFFFF,
+ .raw_crop_last_pixel_shift = 0x0,
+ .raw_crop_last_pixel_mask = 0x3FFF,
+ .raw_crop_first_line_shift = 16,
+ .raw_crop_first_line_mask = 0xFFFF,
+ .raw_crop_last_line_shift = 0,
+ .raw_crop_last_line_mask = 0x3FFF,
+ .input_mux_sel_shift = 5,
+ .input_mux_sel_mask = 0x3,
+ .extern_reg_update_shift = 4,
+ .extern_reg_update_mask = 1,
+ .pixel_pattern_shift = 0,
+ .pixel_pattern_mask = 0x7,
+ .dsp_mode_shift = 23,
+ .dsp_mode_mask = 0x1,
+ .dsp_en_shift = 3,
+ .dsp_en_mask = 0x1,
+ .reg_update_cmd_data = 0x1,
+ .epoch_line_cfg = 0x00140014,
+ .sof_irq_mask = 0x00000001,
+ .epoch0_irq_mask = 0x00000004,
+ .reg_update_irq_mask = 0x00000010,
+ .eof_irq_mask = 0x00000002,
+ .error_irq_mask0 = 0x0003FC00,
+ .error_irq_mask1 = 0x0FFF7E80,
+ .enable_diagnostic_hw = 0x1,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
+ .reset = 0x0000001C,
+ .cgc_ovd = 0x0000002C,
+ .enable = 0x00000040,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_170_reg = {
+ .reset = 0x00000020,
+ .cgc_ovd = 0x00000030,
+ .enable = 0x00000044,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl color_170_reg = {
+ .reset = 0x00000024,
+ .cgc_ovd = 0x00000034,
+ .enable = 0x00000048,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_170_reg = {
+ .reset = 0x00000028,
+ .cgc_ovd = 0x00000038,
+ .enable = 0x0000004C,
+};
+
+static struct cam_vfe_top_ver2_reg_offset_common vfe170_top_common_reg = {
+ .hw_version = 0x00000000,
+ .hw_capability = 0x00000004,
+ .lens_feature = 0x00000008,
+ .stats_feature = 0x0000000C,
+ .color_feature = 0x00000010,
+ .zoom_feature = 0x00000014,
+ .global_reset_cmd = 0x00000018,
+ .module_ctrl = {
+ &lens_170_reg,
+ &stats_170_reg,
+ &color_170_reg,
+ &zoom_170_reg,
+ },
+ .bus_cgc_ovd = 0x0000003C,
+ .core_cfg = 0x00000050,
+ .three_D_cfg = 0x00000054,
+ .violation_status = 0x0000007C,
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_ver2_reg vfe170_rdi_reg = {
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_0_data = {
+ .reg_update_cmd_data = 0x2,
+ .sof_irq_mask = 0x8000000,
+ .reg_update_irq_mask = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_1_data = {
+ .reg_update_cmd_data = 0x4,
+ .sof_irq_mask = 0x10000000,
+ .reg_update_irq_mask = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_2_data = {
+ .reg_update_cmd_data = 0x8,
+ .sof_irq_mask = 0x20000000,
+ .reg_update_irq_mask = 0x80,
+};
+
+static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .camif_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .camif_reg = &vfe170_camif_reg,
+ .reg_data = &vfe_170_camif_reg_data,
+ },
+ .rdi_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .rdi_reg = &vfe170_rdi_reg,
+ .reg_data = {
+ &vfe_170_rdi_0_data,
+ &vfe_170_rdi_1_data,
+ &vfe_170_rdi_2_data,
+ NULL,
+ },
+ },
+ .mux_type = {
+ CAM_VFE_CAMIF_VER_2_0,
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ },
+};
+
+static struct cam_irq_register_set vfe170_bus_irq_reg[3] = {
+ {
+ .mask_reg_offset = 0x00002044,
+ .clear_reg_offset = 0x00002050,
+ .status_reg_offset = 0x0000205C,
+ },
+ {
+ .mask_reg_offset = 0x00002048,
+ .clear_reg_offset = 0x00002054,
+ .status_reg_offset = 0x00002060,
+ },
+ {
+ .mask_reg_offset = 0x0000204C,
+ .clear_reg_offset = 0x00002058,
+ .status_reg_offset = 0x00002064,
+ },
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_3 = {
+ .tile_cfg = 0x0000252C,
+ .h_init = 0x00002530,
+ .v_init = 0x00002534,
+ .meta_addr = 0x00002538,
+ .meta_offset = 0x0000253C,
+ .meta_stride = 0x00002540,
+ .mode_cfg = 0x00002544,
+ .bw_limit = 0x000025A0,
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_4 = {
+ .tile_cfg = 0x0000262C,
+ .h_init = 0x00002630,
+ .v_init = 0x00002634,
+ .meta_addr = 0x00002638,
+ .meta_offset = 0x0000263C,
+ .meta_stride = 0x00002640,
+ .mode_cfg = 0x00002644,
+ .bw_limit = 0x000026A0,
+};
+
+static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
+ .common_reg = {
+ .hw_version = 0x00002000,
+ .hw_capability = 0x00002004,
+ .sw_reset = 0x00002008,
+ .cgc_ovd = 0x0000200C,
+ .pwr_iso_cfg = 0x000020CC,
+ .dual_master_comp_cfg = 0x00002028,
+ .irq_reg_info = {
+ .num_registers = 3,
+ .irq_reg_set = vfe170_bus_irq_reg,
+ .global_clear_offset = 0x00002068,
+ .global_clear_bitmask = 0x00000001,
+ },
+ .comp_error_status = 0x0000206C,
+ .comp_ovrwr_status = 0x00002070,
+ .dual_comp_error_status = 0x00002074,
+ .dual_comp_ovrwr_status = 0x00002078,
+ .addr_sync_cfg = 0x0000207C,
+ .addr_sync_frame_hdr = 0x00002080,
+ .addr_sync_no_sync = 0x00002084,
+ .debug_status_cfg = 0x0000226C,
+ .debug_status_0 = 0x00002270,
+ },
+ .num_client = 20,
+ .bus_client_reg = {
+ /* BUS Client 0 */
+ {
+ .status0 = 0x00002200,
+ .status1 = 0x00002204,
+ .cfg = 0x00002208,
+ .header_addr = 0x0000220C,
+ .header_cfg = 0x00002210,
+ .image_addr = 0x00002214,
+ .image_addr_offset = 0x00002218,
+ .buffer_width_cfg = 0x0000221C,
+ .buffer_height_cfg = 0x00002220,
+ .packer_cfg = 0x00002224,
+ .stride = 0x00002228,
+ .irq_subsample_period = 0x00002248,
+ .irq_subsample_pattern = 0x0000224C,
+ .framedrop_period = 0x00002250,
+ .framedrop_pattern = 0x00002254,
+ .frame_inc = 0x00002258,
+ .burst_limit = 0x0000225C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 1 */
+ {
+ .status0 = 0x00002300,
+ .status1 = 0x00002304,
+ .cfg = 0x00002308,
+ .header_addr = 0x0000230C,
+ .header_cfg = 0x00002310,
+ .image_addr = 0x00002314,
+ .image_addr_offset = 0x00002318,
+ .buffer_width_cfg = 0x0000231C,
+ .buffer_height_cfg = 0x00002320,
+ .packer_cfg = 0x00002324,
+ .stride = 0x00002328,
+ .irq_subsample_period = 0x00002348,
+ .irq_subsample_pattern = 0x0000234C,
+ .framedrop_period = 0x00002350,
+ .framedrop_pattern = 0x00002354,
+ .frame_inc = 0x00002358,
+ .burst_limit = 0x0000235C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 2 */
+ {
+ .status0 = 0x00002400,
+ .status1 = 0x00002404,
+ .cfg = 0x00002408,
+ .header_addr = 0x0000240C,
+ .header_cfg = 0x00002410,
+ .image_addr = 0x00002414,
+ .image_addr_offset = 0x00002418,
+ .buffer_width_cfg = 0x0000241C,
+ .buffer_height_cfg = 0x00002420,
+ .packer_cfg = 0x00002424,
+ .stride = 0x00002428,
+ .irq_subsample_period = 0x00002448,
+ .irq_subsample_pattern = 0x0000244C,
+ .framedrop_period = 0x00002450,
+ .framedrop_pattern = 0x00002454,
+ .frame_inc = 0x00002458,
+ .burst_limit = 0x0000245C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 3 */
+ {
+ .status0 = 0x00002500,
+ .status1 = 0x00002504,
+ .cfg = 0x00002508,
+ .header_addr = 0x0000250C,
+ .header_cfg = 0x00002510,
+ .image_addr = 0x00002514,
+ .image_addr_offset = 0x00002518,
+ .buffer_width_cfg = 0x0000251C,
+ .buffer_height_cfg = 0x00002520,
+ .packer_cfg = 0x00002524,
+ .stride = 0x00002528,
+ .irq_subsample_period = 0x00002548,
+ .irq_subsample_pattern = 0x0000254C,
+ .framedrop_period = 0x00002550,
+ .framedrop_pattern = 0x00002554,
+ .frame_inc = 0x00002558,
+ .burst_limit = 0x0000255C,
+ .ubwc_regs = &ubwc_regs_client_3,
+ },
+ /* BUS Client 4 */
+ {
+ .status0 = 0x00002600,
+ .status1 = 0x00002604,
+ .cfg = 0x00002608,
+ .header_addr = 0x0000260C,
+ .header_cfg = 0x00002610,
+ .image_addr = 0x00002614,
+ .image_addr_offset = 0x00002618,
+ .buffer_width_cfg = 0x0000261C,
+ .buffer_height_cfg = 0x00002620,
+ .packer_cfg = 0x00002624,
+ .stride = 0x00002628,
+ .irq_subsample_period = 0x00002648,
+ .irq_subsample_pattern = 0x0000264C,
+ .framedrop_period = 0x00002650,
+ .framedrop_pattern = 0x00002654,
+ .frame_inc = 0x00002658,
+ .burst_limit = 0x0000265C,
+ .ubwc_regs = &ubwc_regs_client_4,
+ },
+ /* BUS Client 5 */
+ {
+ .status0 = 0x00002700,
+ .status1 = 0x00002704,
+ .cfg = 0x00002708,
+ .header_addr = 0x0000270C,
+ .header_cfg = 0x00002710,
+ .image_addr = 0x00002714,
+ .image_addr_offset = 0x00002718,
+ .buffer_width_cfg = 0x0000271C,
+ .buffer_height_cfg = 0x00002720,
+ .packer_cfg = 0x00002724,
+ .stride = 0x00002728,
+ .irq_subsample_period = 0x00002748,
+ .irq_subsample_pattern = 0x0000274C,
+ .framedrop_period = 0x00002750,
+ .framedrop_pattern = 0x00002754,
+ .frame_inc = 0x00002758,
+ .burst_limit = 0x0000275C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 6 */
+ {
+ .status0 = 0x00002800,
+ .status1 = 0x00002804,
+ .cfg = 0x00002808,
+ .header_addr = 0x0000280C,
+ .header_cfg = 0x00002810,
+ .image_addr = 0x00002814,
+ .image_addr_offset = 0x00002818,
+ .buffer_width_cfg = 0x0000281C,
+ .buffer_height_cfg = 0x00002820,
+ .packer_cfg = 0x00002824,
+ .stride = 0x00002828,
+ .irq_subsample_period = 0x00002848,
+ .irq_subsample_pattern = 0x0000284C,
+ .framedrop_period = 0x00002850,
+ .framedrop_pattern = 0x00002854,
+ .frame_inc = 0x00002858,
+ .burst_limit = 0x0000285C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 7 */
+ {
+ .status0 = 0x00002900,
+ .status1 = 0x00002904,
+ .cfg = 0x00002908,
+ .header_addr = 0x0000290C,
+ .header_cfg = 0x00002910,
+ .image_addr = 0x00002914,
+ .image_addr_offset = 0x00002918,
+ .buffer_width_cfg = 0x0000291C,
+ .buffer_height_cfg = 0x00002920,
+ .packer_cfg = 0x00002924,
+ .stride = 0x00002928,
+ .irq_subsample_period = 0x00002948,
+ .irq_subsample_pattern = 0x0000294C,
+ .framedrop_period = 0x00002950,
+ .framedrop_pattern = 0x00002954,
+ .frame_inc = 0x00002958,
+ .burst_limit = 0x0000295C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 8 */
+ {
+ .status0 = 0x00002A00,
+ .status1 = 0x00002A04,
+ .cfg = 0x00002A08,
+ .header_addr = 0x00002A0C,
+ .header_cfg = 0x00002A10,
+ .image_addr = 0x00002A14,
+ .image_addr_offset = 0x00002A18,
+ .buffer_width_cfg = 0x00002A1C,
+ .buffer_height_cfg = 0x00002A20,
+ .packer_cfg = 0x00002A24,
+ .stride = 0x00002A28,
+ .irq_subsample_period = 0x00002A48,
+ .irq_subsample_pattern = 0x00002A4C,
+ .framedrop_period = 0x00002A50,
+ .framedrop_pattern = 0x00002A54,
+ .frame_inc = 0x00002A58,
+ .burst_limit = 0x00002A5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 9 */
+ {
+ .status0 = 0x00002B00,
+ .status1 = 0x00002B04,
+ .cfg = 0x00002B08,
+ .header_addr = 0x00002B0C,
+ .header_cfg = 0x00002B10,
+ .image_addr = 0x00002B14,
+ .image_addr_offset = 0x00002B18,
+ .buffer_width_cfg = 0x00002B1C,
+ .buffer_height_cfg = 0x00002B20,
+ .packer_cfg = 0x00002B24,
+ .stride = 0x00002B28,
+ .irq_subsample_period = 0x00002B48,
+ .irq_subsample_pattern = 0x00002B4C,
+ .framedrop_period = 0x00002B50,
+ .framedrop_pattern = 0x00002B54,
+ .frame_inc = 0x00002B58,
+ .burst_limit = 0x00002B5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 10 */
+ {
+ .status0 = 0x00002C00,
+ .status1 = 0x00002C04,
+ .cfg = 0x00002C08,
+ .header_addr = 0x00002C0C,
+ .header_cfg = 0x00002C10,
+ .image_addr = 0x00002C14,
+ .image_addr_offset = 0x00002C18,
+ .buffer_width_cfg = 0x00002C1C,
+ .buffer_height_cfg = 0x00002C20,
+ .packer_cfg = 0x00002C24,
+ .stride = 0x00002C28,
+ .irq_subsample_period = 0x00002C48,
+ .irq_subsample_pattern = 0x00002C4C,
+ .framedrop_period = 0x00002C50,
+ .framedrop_pattern = 0x00002C54,
+ .frame_inc = 0x00002C58,
+ .burst_limit = 0x00002C5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 11 */
+ {
+ .status0 = 0x00002D00,
+ .status1 = 0x00002D04,
+ .cfg = 0x00002D08,
+ .header_addr = 0x00002D0C,
+ .header_cfg = 0x00002D10,
+ .image_addr = 0x00002D14,
+ .image_addr_offset = 0x00002D18,
+ .buffer_width_cfg = 0x00002D1C,
+ .buffer_height_cfg = 0x00002D20,
+ .packer_cfg = 0x00002D24,
+ .stride = 0x00002D28,
+ .irq_subsample_period = 0x00002D48,
+ .irq_subsample_pattern = 0x00002D4C,
+ .framedrop_period = 0x00002D50,
+ .framedrop_pattern = 0x00002D54,
+ .frame_inc = 0x00002D58,
+ .burst_limit = 0x00002D5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 12 */
+ {
+ .status0 = 0x00002E00,
+ .status1 = 0x00002E04,
+ .cfg = 0x00002E08,
+ .header_addr = 0x00002E0C,
+ .header_cfg = 0x00002E10,
+ .image_addr = 0x00002E14,
+ .image_addr_offset = 0x00002E18,
+ .buffer_width_cfg = 0x00002E1C,
+ .buffer_height_cfg = 0x00002E20,
+ .packer_cfg = 0x00002E24,
+ .stride = 0x00002E28,
+ .irq_subsample_period = 0x00002E48,
+ .irq_subsample_pattern = 0x00002E4C,
+ .framedrop_period = 0x00002E50,
+ .framedrop_pattern = 0x00002E54,
+ .frame_inc = 0x00002E58,
+ .burst_limit = 0x00002E5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 13 */
+ {
+ .status0 = 0x00002F00,
+ .status1 = 0x00002F04,
+ .cfg = 0x00002F08,
+ .header_addr = 0x00002F0C,
+ .header_cfg = 0x00002F10,
+ .image_addr = 0x00002F14,
+ .image_addr_offset = 0x00002F18,
+ .buffer_width_cfg = 0x00002F1C,
+ .buffer_height_cfg = 0x00002F20,
+ .packer_cfg = 0x00002F24,
+ .stride = 0x00002F28,
+ .irq_subsample_period = 0x00002F48,
+ .irq_subsample_pattern = 0x00002F4C,
+ .framedrop_period = 0x00002F50,
+ .framedrop_pattern = 0x00002F54,
+ .frame_inc = 0x00002F58,
+ .burst_limit = 0x00002F5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 14 */
+ {
+ .status0 = 0x00003000,
+ .status1 = 0x00003004,
+ .cfg = 0x00003008,
+ .header_addr = 0x0000300C,
+ .header_cfg = 0x00003010,
+ .image_addr = 0x00003014,
+ .image_addr_offset = 0x00003018,
+ .buffer_width_cfg = 0x0000301C,
+ .buffer_height_cfg = 0x00003020,
+ .packer_cfg = 0x00003024,
+ .stride = 0x00003028,
+ .irq_subsample_period = 0x00003048,
+ .irq_subsample_pattern = 0x0000304C,
+ .framedrop_period = 0x00003050,
+ .framedrop_pattern = 0x00003054,
+ .frame_inc = 0x00003058,
+ .burst_limit = 0x0000305C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 15 */
+ {
+ .status0 = 0x00003100,
+ .status1 = 0x00003104,
+ .cfg = 0x00003108,
+ .header_addr = 0x0000310C,
+ .header_cfg = 0x00003110,
+ .image_addr = 0x00003114,
+ .image_addr_offset = 0x00003118,
+ .buffer_width_cfg = 0x0000311C,
+ .buffer_height_cfg = 0x00003120,
+ .packer_cfg = 0x00003124,
+ .stride = 0x00003128,
+ .irq_subsample_period = 0x00003148,
+ .irq_subsample_pattern = 0x0000314C,
+ .framedrop_period = 0x00003150,
+ .framedrop_pattern = 0x00003154,
+ .frame_inc = 0x00003158,
+ .burst_limit = 0x0000315C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 16 */
+ {
+ .status0 = 0x00003200,
+ .status1 = 0x00003204,
+ .cfg = 0x00003208,
+ .header_addr = 0x0000320C,
+ .header_cfg = 0x00003210,
+ .image_addr = 0x00003214,
+ .image_addr_offset = 0x00003218,
+ .buffer_width_cfg = 0x0000321C,
+ .buffer_height_cfg = 0x00003220,
+ .packer_cfg = 0x00003224,
+ .stride = 0x00003228,
+ .irq_subsample_period = 0x00003248,
+ .irq_subsample_pattern = 0x0000324C,
+ .framedrop_period = 0x00003250,
+ .framedrop_pattern = 0x00003254,
+ .frame_inc = 0x00003258,
+ .burst_limit = 0x0000325C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 17 */
+ {
+ .status0 = 0x00003300,
+ .status1 = 0x00003304,
+ .cfg = 0x00003308,
+ .header_addr = 0x0000330C,
+ .header_cfg = 0x00003310,
+ .image_addr = 0x00003314,
+ .image_addr_offset = 0x00003318,
+ .buffer_width_cfg = 0x0000331C,
+ .buffer_height_cfg = 0x00003320,
+ .packer_cfg = 0x00003324,
+ .stride = 0x00003328,
+ .irq_subsample_period = 0x00003348,
+ .irq_subsample_pattern = 0x0000334C,
+ .framedrop_period = 0x00003350,
+ .framedrop_pattern = 0x00003354,
+ .frame_inc = 0x00003358,
+ .burst_limit = 0x0000335C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 18 */
+ {
+ .status0 = 0x00003400,
+ .status1 = 0x00003404,
+ .cfg = 0x00003408,
+ .header_addr = 0x0000340C,
+ .header_cfg = 0x00003410,
+ .image_addr = 0x00003414,
+ .image_addr_offset = 0x00003418,
+ .buffer_width_cfg = 0x0000341C,
+ .buffer_height_cfg = 0x00003420,
+ .packer_cfg = 0x00003424,
+ .stride = 0x00003428,
+ .irq_subsample_period = 0x00003448,
+ .irq_subsample_pattern = 0x0000344C,
+ .framedrop_period = 0x00003450,
+ .framedrop_pattern = 0x00003454,
+ .frame_inc = 0x00003458,
+ .burst_limit = 0x0000345C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 19 */
+ {
+ .status0 = 0x00003500,
+ .status1 = 0x00003504,
+ .cfg = 0x00003508,
+ .header_addr = 0x0000350C,
+ .header_cfg = 0x00003510,
+ .image_addr = 0x00003514,
+ .image_addr_offset = 0x00003518,
+ .buffer_width_cfg = 0x0000351C,
+ .buffer_height_cfg = 0x00003520,
+ .packer_cfg = 0x00003524,
+ .stride = 0x00003528,
+ .irq_subsample_period = 0x00003548,
+ .irq_subsample_pattern = 0x0000354C,
+ .framedrop_period = 0x00003550,
+ .framedrop_pattern = 0x00003554,
+ .frame_inc = 0x00003558,
+ .burst_limit = 0x0000355C,
+ .ubwc_regs = NULL,
+ },
+ },
+ .comp_grp_reg = {
+ /* CAM_VFE_BUS_VER2_COMP_GRP_0 */
+ {
+ .comp_mask = 0x00002010,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_1 */
+ {
+ .comp_mask = 0x00002014,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_2 */
+ {
+ .comp_mask = 0x00002018,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_3 */
+ {
+ .comp_mask = 0x0000201C,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_4 */
+ {
+ .comp_mask = 0x00002020,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_5 */
+ {
+ .comp_mask = 0x00002024,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
+ {
+ .comp_mask = 0x0000202C,
+ .addr_sync_mask = 0x00002088,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
+ {
+ .comp_mask = 0x00002030,
+ .addr_sync_mask = 0x0000208C,
+
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
+ {
+ .comp_mask = 0x00002034,
+ .addr_sync_mask = 0x00002090,
+
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
+ {
+ .comp_mask = 0x00002038,
+ .addr_sync_mask = 0x00002094,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
+ {
+ .comp_mask = 0x0000203C,
+ .addr_sync_mask = 0x00002098,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
+ {
+ .comp_mask = 0x00002040,
+ .addr_sync_mask = 0x0000209C,
+ },
+ },
+ .num_out = 18,
+ .vfe_out_hw_info = {
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FULL,
+ .max_width = 4096,
+ .max_height = 4096,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS4,
+ .max_width = 1920,
+ .max_height = 1080,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS16,
+ .max_width = 1920,
+ .max_height = 1080,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FD,
+ .max_width = 1920,
+ .max_height = 1080,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_PDAF,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+ .max_width = 1920,
+ .max_height = 1080,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ },
+};
+
+struct cam_vfe_hw_info cam_vfe170_hw_info = {
+ .irq_reg_info = &vfe170_top_irq_reg_info,
+
+ .bus_version = CAM_VFE_BUS_VER_2_0,
+ .bus_hw_info = &vfe170_bus_hw_info,
+
+ .top_version = CAM_VFE_TOP_VER_2_0,
+ .top_hw_info = &vfe170_top_hw_info,
+
+ .camif_version = CAM_VFE_CAMIF_VER_2_0,
+ .camif_reg = &vfe170_camif_reg,
+
+};
+
+#endif /* _CAM_VFE170_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe_lite170.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe_lite170.c
new file mode 100644
index 000000000000..ab692cf095e4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe_lite170.c
@@ -0,0 +1,52 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_vfe_lite170.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_dev.h"
+
+static const struct of_device_id cam_vfe170_dt_match[] = {
+ {
+ .compatible = "qcom,vfe-lite170",
+ .data = &cam_vfe_lite170_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_vfe170_dt_match);
+
+static struct platform_driver cam_vfe170_driver = {
+ .probe = cam_vfe_probe,
+ .remove = cam_vfe_remove,
+ .driver = {
+ .name = "cam_vfe_lite170",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_vfe170_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_vfe170_init_module(void)
+{
+ return platform_driver_register(&cam_vfe170_driver);
+}
+
+static void __exit cam_vfe170_exit_module(void)
+{
+ platform_driver_unregister(&cam_vfe170_driver);
+}
+
+module_init(cam_vfe170_init_module);
+module_exit(cam_vfe170_exit_module);
+MODULE_DESCRIPTION("CAM VFE170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe_lite170.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe_lite170.h
new file mode 100644
index 000000000000..4b2e0963a599
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe_lite170.h
@@ -0,0 +1,336 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_LITE170_H_
+#define _CAM_VFE_LITE170_H_
+
+#include "cam_vfe_bus_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_core.h"
+
+static struct cam_irq_register_set vfe170_top_irq_reg_set[2] = {
+ {
+ .mask_reg_offset = 0x0000005C,
+ .clear_reg_offset = 0x00000064,
+ .status_reg_offset = 0x0000006C,
+ },
+ {
+ .mask_reg_offset = 0x00000060,
+ .clear_reg_offset = 0x00000068,
+ .status_reg_offset = 0x00000070,
+ },
+};
+
+static struct cam_irq_controller_reg_info vfe170_top_irq_reg_info = {
+ .num_registers = 2,
+ .irq_reg_set = vfe170_top_irq_reg_set,
+ .global_clear_offset = 0x00000058,
+ .global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_top_ver2_reg_offset_common vfe170_top_common_reg = {
+ .hw_version = 0x00000000,
+ .hw_capability = 0x00000004,
+ .lens_feature = 0x00000008,
+ .stats_feature = 0x0000000C,
+ .color_feature = 0x00000010,
+ .zoom_feature = 0x00000014,
+ .global_reset_cmd = 0x00000018,
+ .module_ctrl = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+ .bus_cgc_ovd = 0x0000003C,
+ .core_cfg = 0x00000000,
+ .three_D_cfg = 0x00000000,
+ .violation_status = 0x0000007C,
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_ver2_reg vfe170_rdi_reg = {
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data vfe170_rdi_0_data = {
+ .reg_update_cmd_data = 0x2,
+ .sof_irq_mask = 0x8000000,
+ .reg_update_irq_mask = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data vfe170_rdi_1_data = {
+ .reg_update_cmd_data = 0x4,
+ .sof_irq_mask = 0x10000000,
+ .reg_update_irq_mask = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data vfe170_rdi_2_data = {
+ .reg_update_cmd_data = 0x8,
+ .sof_irq_mask = 0x20000000,
+ .reg_update_irq_mask = 0x80,
+};
+
+static struct cam_vfe_rdi_reg_data vfe170_rdi_3_data = {
+ .reg_update_cmd_data = 0x10,
+ .sof_irq_mask = 0x40000000,
+ .reg_update_irq_mask = 0x100,
+};
+
+static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .camif_hw_info = {
+ .common_reg = NULL,
+ .camif_reg = NULL,
+ .reg_data = NULL,
+ },
+ .rdi_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .rdi_reg = &vfe170_rdi_reg,
+ .reg_data = {
+ &vfe170_rdi_0_data,
+ &vfe170_rdi_1_data,
+ &vfe170_rdi_2_data,
+ &vfe170_rdi_3_data,
+ },
+ },
+ .mux_type = {
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ },
+};
+
+static struct cam_irq_register_set vfe170_bus_irq_reg[3] = {
+ {
+ .mask_reg_offset = 0x00002044,
+ .clear_reg_offset = 0x00002050,
+ .status_reg_offset = 0x0000205C,
+ },
+ {
+ .mask_reg_offset = 0x00002048,
+ .clear_reg_offset = 0x00002054,
+ .status_reg_offset = 0x00002060,
+ },
+ {
+ .mask_reg_offset = 0x0000204C,
+ .clear_reg_offset = 0x00002058,
+ .status_reg_offset = 0x00002064,
+ },
+};
+
+static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
+ .common_reg = {
+ .hw_version = 0x00002000,
+ .hw_capability = 0x00002004,
+ .sw_reset = 0x00002008,
+ .cgc_ovd = 0x0000200C,
+ .pwr_iso_cfg = 0x000020CC,
+ .dual_master_comp_cfg = 0x00002028,
+ .irq_reg_info = {
+ .num_registers = 3,
+ .irq_reg_set = vfe170_bus_irq_reg,
+ .global_clear_offset = 0x00002068,
+ .global_clear_bitmask = 0x00000001,
+ },
+ .comp_error_status = 0x0000206C,
+ .comp_ovrwr_status = 0x00002070,
+ .dual_comp_error_status = 0x00002074,
+ .dual_comp_ovrwr_status = 0x00002078,
+ .addr_sync_cfg = 0x0000207C,
+ .addr_sync_frame_hdr = 0x00002080,
+ .addr_sync_no_sync = 0x00002084,
+ },
+ .num_client = 4,
+ .bus_client_reg = {
+ /* BUS Client 0 */
+ {
+ .status0 = 0x00002200,
+ .status1 = 0x00002204,
+ .cfg = 0x00002208,
+ .header_addr = 0x0000220C,
+ .header_cfg = 0x00002210,
+ .image_addr = 0x00002214,
+ .image_addr_offset = 0x00002218,
+ .buffer_width_cfg = 0x0000221C,
+ .buffer_height_cfg = 0x00002220,
+ .packer_cfg = 0x00002224,
+ .stride = 0x00002228,
+ .irq_subsample_period = 0x00002248,
+ .irq_subsample_pattern = 0x0000224C,
+ .framedrop_period = 0x00002250,
+ .framedrop_pattern = 0x00002254,
+ .frame_inc = 0x00002258,
+ .burst_limit = 0x0000225C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 1 */
+ {
+ .status0 = 0x00002300,
+ .status1 = 0x00002304,
+ .cfg = 0x00002308,
+ .header_addr = 0x0000230C,
+ .header_cfg = 0x00002310,
+ .image_addr = 0x00002314,
+ .image_addr_offset = 0x00002318,
+ .buffer_width_cfg = 0x0000231C,
+ .buffer_height_cfg = 0x00002320,
+ .packer_cfg = 0x00002324,
+ .stride = 0x00002328,
+ .irq_subsample_period = 0x00002348,
+ .irq_subsample_pattern = 0x0000234C,
+ .framedrop_period = 0x00002350,
+ .framedrop_pattern = 0x00002354,
+ .frame_inc = 0x00002358,
+ .burst_limit = 0x0000235C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 2 */
+ {
+ .status0 = 0x00002400,
+ .status1 = 0x00002404,
+ .cfg = 0x00002408,
+ .header_addr = 0x0000240C,
+ .header_cfg = 0x00002410,
+ .image_addr = 0x00002414,
+ .image_addr_offset = 0x00002418,
+ .buffer_width_cfg = 0x0000241C,
+ .buffer_height_cfg = 0x00002420,
+ .packer_cfg = 0x00002424,
+ .stride = 0x00002428,
+ .irq_subsample_period = 0x00002448,
+ .irq_subsample_pattern = 0x0000244C,
+ .framedrop_period = 0x00002450,
+ .framedrop_pattern = 0x00002454,
+ .frame_inc = 0x00002458,
+ .burst_limit = 0x0000245C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 3 */
+ {
+ .status0 = 0x00002500,
+ .status1 = 0x00002504,
+ .cfg = 0x00002508,
+ .header_addr = 0x0000250C,
+ .header_cfg = 0x00002510,
+ .image_addr = 0x00002514,
+ .image_addr_offset = 0x00002518,
+ .buffer_width_cfg = 0x0000251C,
+ .buffer_height_cfg = 0x00002520,
+ .packer_cfg = 0x00002524,
+ .stride = 0x00002528,
+ .irq_subsample_period = 0x00002548,
+ .irq_subsample_pattern = 0x0000254C,
+ .framedrop_period = 0x00002550,
+ .framedrop_pattern = 0x00002554,
+ .frame_inc = 0x00002558,
+ .burst_limit = 0x0000255C,
+ .ubwc_regs = NULL,
+ },
+ },
+ .comp_grp_reg = {
+ /* CAM_VFE_BUS_VER2_COMP_GRP_0 */
+ {
+ .comp_mask = 0x00002010,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_1 */
+ {
+ .comp_mask = 0x00002014,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_2 */
+ {
+ .comp_mask = 0x00002018,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_3 */
+ {
+ .comp_mask = 0x0000201C,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_4 */
+ {
+ .comp_mask = 0x00002020,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_5 */
+ {
+ .comp_mask = 0x00002024,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
+ {
+ .comp_mask = 0x0000202C,
+ .addr_sync_mask = 0x00002088,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
+ {
+ .comp_mask = 0x00002030,
+ .addr_sync_mask = 0x0000208C,
+
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
+ {
+ .comp_mask = 0x00002034,
+ .addr_sync_mask = 0x00002090,
+
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
+ {
+ .comp_mask = 0x00002038,
+ .addr_sync_mask = 0x00002094,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
+ {
+ .comp_mask = 0x0000203C,
+ .addr_sync_mask = 0x00002098,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
+ {
+ .comp_mask = 0x00002040,
+ .addr_sync_mask = 0x0000209C,
+ },
+ },
+ .num_out = 4,
+ .vfe_out_hw_info = {
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI3,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ },
+};
+
+static struct cam_vfe_hw_info cam_vfe_lite170_hw_info = {
+ .irq_reg_info = &vfe170_top_irq_reg_info,
+
+ .bus_version = CAM_VFE_BUS_VER_2_0,
+ .bus_hw_info = &vfe170_bus_hw_info,
+
+ .top_version = CAM_VFE_TOP_VER_2_0,
+ .top_hw_info = &vfe170_top_hw_info,
+
+};
+
+#endif /* _CAM_VFE_LITE170_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile
new file mode 100644
index 000000000000..ea07ed3ed495
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe17x.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
new file mode 100644
index 000000000000..88d5b132ad3d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
@@ -0,0 +1,850 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE170_H_
+#define _CAM_VFE170_H_
+
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_core.h"
+
+static struct cam_irq_register_set vfe170_top_irq_reg_set[2] = {
+ {
+ .mask_reg_offset = 0x0000005C,
+ .clear_reg_offset = 0x00000064,
+ .status_reg_offset = 0x0000006C,
+ },
+ {
+ .mask_reg_offset = 0x00000060,
+ .clear_reg_offset = 0x00000068,
+ .status_reg_offset = 0x00000070,
+ },
+};
+
+static struct cam_irq_controller_reg_info vfe170_top_irq_reg_info = {
+ .num_registers = 2,
+ .irq_reg_set = vfe170_top_irq_reg_set,
+ .global_clear_offset = 0x00000058,
+ .global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_camif_ver2_reg vfe170_camif_reg = {
+ .camif_cmd = 0x00000478,
+ .camif_config = 0x0000047C,
+ .line_skip_pattern = 0x00000488,
+ .pixel_skip_pattern = 0x0000048C,
+ .skip_period = 0x00000490,
+ .irq_subsample_pattern = 0x0000049C,
+ .epoch_irq = 0x000004A0,
+ .raw_crop_width_cfg = 0x00000CE4,
+ .raw_crop_height_cfg = 0x00000CE8,
+ .reg_update_cmd = 0x000004AC,
+ .vfe_diag_config = 0x00000C48,
+ .vfe_diag_sensor_status = 0x00000C4C,
+};
+
+static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = {
+ .raw_crop_first_pixel_shift = 16,
+ .raw_crop_first_pixel_mask = 0xFFFF,
+ .raw_crop_last_pixel_shift = 0x0,
+ .raw_crop_last_pixel_mask = 0x3FFF,
+ .raw_crop_first_line_shift = 16,
+ .raw_crop_first_line_mask = 0xFFFF,
+ .raw_crop_last_line_shift = 0,
+ .raw_crop_last_line_mask = 0x3FFF,
+ .input_mux_sel_shift = 5,
+ .input_mux_sel_mask = 0x3,
+ .extern_reg_update_shift = 4,
+ .extern_reg_update_mask = 1,
+ .pixel_pattern_shift = 0,
+ .pixel_pattern_mask = 0x7,
+ .dsp_mode_shift = 23,
+ .dsp_mode_mask = 0x1,
+ .dsp_en_shift = 3,
+ .dsp_en_mask = 0x1,
+ .reg_update_cmd_data = 0x1,
+ .epoch_line_cfg = 0x00140014,
+ .sof_irq_mask = 0x00000001,
+ .epoch0_irq_mask = 0x00000004,
+ .reg_update_irq_mask = 0x00000010,
+ .eof_irq_mask = 0x00000002,
+ .error_irq_mask0 = 0x0003FC00,
+ .error_irq_mask1 = 0x0FFF7E80,
+ .enable_diagnostic_hw = 0x1,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
+ .reset = 0x0000001C,
+ .cgc_ovd = 0x0000002C,
+ .enable = 0x00000040,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_170_reg = {
+ .reset = 0x00000020,
+ .cgc_ovd = 0x00000030,
+ .enable = 0x00000044,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl color_170_reg = {
+ .reset = 0x00000024,
+ .cgc_ovd = 0x00000034,
+ .enable = 0x00000048,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_170_reg = {
+ .reset = 0x00000028,
+ .cgc_ovd = 0x00000038,
+ .enable = 0x0000004C,
+};
+
+static struct cam_vfe_top_ver2_reg_offset_common vfe170_top_common_reg = {
+ .hw_version = 0x00000000,
+ .hw_capability = 0x00000004,
+ .lens_feature = 0x00000008,
+ .stats_feature = 0x0000000C,
+ .color_feature = 0x00000010,
+ .zoom_feature = 0x00000014,
+ .global_reset_cmd = 0x00000018,
+ .module_ctrl = {
+ &lens_170_reg,
+ &stats_170_reg,
+ &color_170_reg,
+ &zoom_170_reg,
+ },
+ .bus_cgc_ovd = 0x0000003C,
+ .core_cfg = 0x00000050,
+ .three_D_cfg = 0x00000054,
+ .violation_status = 0x0000007C,
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_ver2_reg vfe170_rdi_reg = {
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_0_data = {
+ .reg_update_cmd_data = 0x2,
+ .sof_irq_mask = 0x8000000,
+ .reg_update_irq_mask = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_1_data = {
+ .reg_update_cmd_data = 0x4,
+ .sof_irq_mask = 0x10000000,
+ .reg_update_irq_mask = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_2_data = {
+ .reg_update_cmd_data = 0x8,
+ .sof_irq_mask = 0x20000000,
+ .reg_update_irq_mask = 0x80,
+};
+
+static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .camif_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .camif_reg = &vfe170_camif_reg,
+ .reg_data = &vfe_170_camif_reg_data,
+ },
+ .camif_lite_hw_info = {
+ .common_reg = NULL,
+ .camif_lite_reg = NULL,
+ .reg_data = NULL,
+ },
+ .rdi_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .rdi_reg = &vfe170_rdi_reg,
+ .reg_data = {
+ &vfe_170_rdi_0_data,
+ &vfe_170_rdi_1_data,
+ &vfe_170_rdi_2_data,
+ NULL,
+ },
+ },
+ .mux_type = {
+ CAM_VFE_CAMIF_VER_2_0,
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ },
+};
+
+static struct cam_irq_register_set vfe170_bus_irq_reg[3] = {
+ {
+ .mask_reg_offset = 0x00002044,
+ .clear_reg_offset = 0x00002050,
+ .status_reg_offset = 0x0000205C,
+ },
+ {
+ .mask_reg_offset = 0x00002048,
+ .clear_reg_offset = 0x00002054,
+ .status_reg_offset = 0x00002060,
+ },
+ {
+ .mask_reg_offset = 0x0000204C,
+ .clear_reg_offset = 0x00002058,
+ .status_reg_offset = 0x00002064,
+ },
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_3 = {
+ .tile_cfg = 0x0000252C,
+ .h_init = 0x00002530,
+ .v_init = 0x00002534,
+ .meta_addr = 0x00002538,
+ .meta_offset = 0x0000253C,
+ .meta_stride = 0x00002540,
+ .mode_cfg_0 = 0x00002544,
+ .bw_limit = 0x000025A0,
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_4 = {
+ .tile_cfg = 0x0000262C,
+ .h_init = 0x00002630,
+ .v_init = 0x00002634,
+ .meta_addr = 0x00002638,
+ .meta_offset = 0x0000263C,
+ .meta_stride = 0x00002640,
+ .mode_cfg_0 = 0x00002644,
+ .bw_limit = 0x000026A0,
+};
+
+static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
+ .common_reg = {
+ .hw_version = 0x00002000,
+ .hw_capability = 0x00002004,
+ .sw_reset = 0x00002008,
+ .cgc_ovd = 0x0000200C,
+ .pwr_iso_cfg = 0x000020CC,
+ .dual_master_comp_cfg = 0x00002028,
+ .irq_reg_info = {
+ .num_registers = 3,
+ .irq_reg_set = vfe170_bus_irq_reg,
+ .global_clear_offset = 0x00002068,
+ .global_clear_bitmask = 0x00000001,
+ },
+ .comp_error_status = 0x0000206C,
+ .comp_ovrwr_status = 0x00002070,
+ .dual_comp_error_status = 0x00002074,
+ .dual_comp_ovrwr_status = 0x00002078,
+ .addr_sync_cfg = 0x0000207C,
+ .addr_sync_frame_hdr = 0x00002080,
+ .addr_sync_no_sync = 0x00002084,
+ .debug_status_cfg = 0x0000226C,
+ .debug_status_0 = 0x00002270,
+ },
+ .num_client = 20,
+ .bus_client_reg = {
+ /* BUS Client 0 */
+ {
+ .status0 = 0x00002200,
+ .status1 = 0x00002204,
+ .cfg = 0x00002208,
+ .header_addr = 0x0000220C,
+ .header_cfg = 0x00002210,
+ .image_addr = 0x00002214,
+ .image_addr_offset = 0x00002218,
+ .buffer_width_cfg = 0x0000221C,
+ .buffer_height_cfg = 0x00002220,
+ .packer_cfg = 0x00002224,
+ .stride = 0x00002228,
+ .irq_subsample_period = 0x00002248,
+ .irq_subsample_pattern = 0x0000224C,
+ .framedrop_period = 0x00002250,
+ .framedrop_pattern = 0x00002254,
+ .frame_inc = 0x00002258,
+ .burst_limit = 0x0000225C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 1 */
+ {
+ .status0 = 0x00002300,
+ .status1 = 0x00002304,
+ .cfg = 0x00002308,
+ .header_addr = 0x0000230C,
+ .header_cfg = 0x00002310,
+ .image_addr = 0x00002314,
+ .image_addr_offset = 0x00002318,
+ .buffer_width_cfg = 0x0000231C,
+ .buffer_height_cfg = 0x00002320,
+ .packer_cfg = 0x00002324,
+ .stride = 0x00002328,
+ .irq_subsample_period = 0x00002348,
+ .irq_subsample_pattern = 0x0000234C,
+ .framedrop_period = 0x00002350,
+ .framedrop_pattern = 0x00002354,
+ .frame_inc = 0x00002358,
+ .burst_limit = 0x0000235C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 2 */
+ {
+ .status0 = 0x00002400,
+ .status1 = 0x00002404,
+ .cfg = 0x00002408,
+ .header_addr = 0x0000240C,
+ .header_cfg = 0x00002410,
+ .image_addr = 0x00002414,
+ .image_addr_offset = 0x00002418,
+ .buffer_width_cfg = 0x0000241C,
+ .buffer_height_cfg = 0x00002420,
+ .packer_cfg = 0x00002424,
+ .stride = 0x00002428,
+ .irq_subsample_period = 0x00002448,
+ .irq_subsample_pattern = 0x0000244C,
+ .framedrop_period = 0x00002450,
+ .framedrop_pattern = 0x00002454,
+ .frame_inc = 0x00002458,
+ .burst_limit = 0x0000245C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 3 */
+ {
+ .status0 = 0x00002500,
+ .status1 = 0x00002504,
+ .cfg = 0x00002508,
+ .header_addr = 0x0000250C,
+ .header_cfg = 0x00002510,
+ .image_addr = 0x00002514,
+ .image_addr_offset = 0x00002518,
+ .buffer_width_cfg = 0x0000251C,
+ .buffer_height_cfg = 0x00002520,
+ .packer_cfg = 0x00002524,
+ .stride = 0x00002528,
+ .irq_subsample_period = 0x00002548,
+ .irq_subsample_pattern = 0x0000254C,
+ .framedrop_period = 0x00002550,
+ .framedrop_pattern = 0x00002554,
+ .frame_inc = 0x00002558,
+ .burst_limit = 0x0000255C,
+ .ubwc_regs = &ubwc_regs_client_3,
+ },
+ /* BUS Client 4 */
+ {
+ .status0 = 0x00002600,
+ .status1 = 0x00002604,
+ .cfg = 0x00002608,
+ .header_addr = 0x0000260C,
+ .header_cfg = 0x00002610,
+ .image_addr = 0x00002614,
+ .image_addr_offset = 0x00002618,
+ .buffer_width_cfg = 0x0000261C,
+ .buffer_height_cfg = 0x00002620,
+ .packer_cfg = 0x00002624,
+ .stride = 0x00002628,
+ .irq_subsample_period = 0x00002648,
+ .irq_subsample_pattern = 0x0000264C,
+ .framedrop_period = 0x00002650,
+ .framedrop_pattern = 0x00002654,
+ .frame_inc = 0x00002658,
+ .burst_limit = 0x0000265C,
+ .ubwc_regs = &ubwc_regs_client_4,
+ },
+ /* BUS Client 5 */
+ {
+ .status0 = 0x00002700,
+ .status1 = 0x00002704,
+ .cfg = 0x00002708,
+ .header_addr = 0x0000270C,
+ .header_cfg = 0x00002710,
+ .image_addr = 0x00002714,
+ .image_addr_offset = 0x00002718,
+ .buffer_width_cfg = 0x0000271C,
+ .buffer_height_cfg = 0x00002720,
+ .packer_cfg = 0x00002724,
+ .stride = 0x00002728,
+ .irq_subsample_period = 0x00002748,
+ .irq_subsample_pattern = 0x0000274C,
+ .framedrop_period = 0x00002750,
+ .framedrop_pattern = 0x00002754,
+ .frame_inc = 0x00002758,
+ .burst_limit = 0x0000275C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 6 */
+ {
+ .status0 = 0x00002800,
+ .status1 = 0x00002804,
+ .cfg = 0x00002808,
+ .header_addr = 0x0000280C,
+ .header_cfg = 0x00002810,
+ .image_addr = 0x00002814,
+ .image_addr_offset = 0x00002818,
+ .buffer_width_cfg = 0x0000281C,
+ .buffer_height_cfg = 0x00002820,
+ .packer_cfg = 0x00002824,
+ .stride = 0x00002828,
+ .irq_subsample_period = 0x00002848,
+ .irq_subsample_pattern = 0x0000284C,
+ .framedrop_period = 0x00002850,
+ .framedrop_pattern = 0x00002854,
+ .frame_inc = 0x00002858,
+ .burst_limit = 0x0000285C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 7 */
+ {
+ .status0 = 0x00002900,
+ .status1 = 0x00002904,
+ .cfg = 0x00002908,
+ .header_addr = 0x0000290C,
+ .header_cfg = 0x00002910,
+ .image_addr = 0x00002914,
+ .image_addr_offset = 0x00002918,
+ .buffer_width_cfg = 0x0000291C,
+ .buffer_height_cfg = 0x00002920,
+ .packer_cfg = 0x00002924,
+ .stride = 0x00002928,
+ .irq_subsample_period = 0x00002948,
+ .irq_subsample_pattern = 0x0000294C,
+ .framedrop_period = 0x00002950,
+ .framedrop_pattern = 0x00002954,
+ .frame_inc = 0x00002958,
+ .burst_limit = 0x0000295C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 8 */
+ {
+ .status0 = 0x00002A00,
+ .status1 = 0x00002A04,
+ .cfg = 0x00002A08,
+ .header_addr = 0x00002A0C,
+ .header_cfg = 0x00002A10,
+ .image_addr = 0x00002A14,
+ .image_addr_offset = 0x00002A18,
+ .buffer_width_cfg = 0x00002A1C,
+ .buffer_height_cfg = 0x00002A20,
+ .packer_cfg = 0x00002A24,
+ .stride = 0x00002A28,
+ .irq_subsample_period = 0x00002A48,
+ .irq_subsample_pattern = 0x00002A4C,
+ .framedrop_period = 0x00002A50,
+ .framedrop_pattern = 0x00002A54,
+ .frame_inc = 0x00002A58,
+ .burst_limit = 0x00002A5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 9 */
+ {
+ .status0 = 0x00002B00,
+ .status1 = 0x00002B04,
+ .cfg = 0x00002B08,
+ .header_addr = 0x00002B0C,
+ .header_cfg = 0x00002B10,
+ .image_addr = 0x00002B14,
+ .image_addr_offset = 0x00002B18,
+ .buffer_width_cfg = 0x00002B1C,
+ .buffer_height_cfg = 0x00002B20,
+ .packer_cfg = 0x00002B24,
+ .stride = 0x00002B28,
+ .irq_subsample_period = 0x00002B48,
+ .irq_subsample_pattern = 0x00002B4C,
+ .framedrop_period = 0x00002B50,
+ .framedrop_pattern = 0x00002B54,
+ .frame_inc = 0x00002B58,
+ .burst_limit = 0x00002B5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 10 */
+ {
+ .status0 = 0x00002C00,
+ .status1 = 0x00002C04,
+ .cfg = 0x00002C08,
+ .header_addr = 0x00002C0C,
+ .header_cfg = 0x00002C10,
+ .image_addr = 0x00002C14,
+ .image_addr_offset = 0x00002C18,
+ .buffer_width_cfg = 0x00002C1C,
+ .buffer_height_cfg = 0x00002C20,
+ .packer_cfg = 0x00002C24,
+ .stride = 0x00002C28,
+ .irq_subsample_period = 0x00002C48,
+ .irq_subsample_pattern = 0x00002C4C,
+ .framedrop_period = 0x00002C50,
+ .framedrop_pattern = 0x00002C54,
+ .frame_inc = 0x00002C58,
+ .burst_limit = 0x00002C5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 11 */
+ {
+ .status0 = 0x00002D00,
+ .status1 = 0x00002D04,
+ .cfg = 0x00002D08,
+ .header_addr = 0x00002D0C,
+ .header_cfg = 0x00002D10,
+ .image_addr = 0x00002D14,
+ .image_addr_offset = 0x00002D18,
+ .buffer_width_cfg = 0x00002D1C,
+ .buffer_height_cfg = 0x00002D20,
+ .packer_cfg = 0x00002D24,
+ .stride = 0x00002D28,
+ .irq_subsample_period = 0x00002D48,
+ .irq_subsample_pattern = 0x00002D4C,
+ .framedrop_period = 0x00002D50,
+ .framedrop_pattern = 0x00002D54,
+ .frame_inc = 0x00002D58,
+ .burst_limit = 0x00002D5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 12 */
+ {
+ .status0 = 0x00002E00,
+ .status1 = 0x00002E04,
+ .cfg = 0x00002E08,
+ .header_addr = 0x00002E0C,
+ .header_cfg = 0x00002E10,
+ .image_addr = 0x00002E14,
+ .image_addr_offset = 0x00002E18,
+ .buffer_width_cfg = 0x00002E1C,
+ .buffer_height_cfg = 0x00002E20,
+ .packer_cfg = 0x00002E24,
+ .stride = 0x00002E28,
+ .irq_subsample_period = 0x00002E48,
+ .irq_subsample_pattern = 0x00002E4C,
+ .framedrop_period = 0x00002E50,
+ .framedrop_pattern = 0x00002E54,
+ .frame_inc = 0x00002E58,
+ .burst_limit = 0x00002E5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 13 */
+ {
+ .status0 = 0x00002F00,
+ .status1 = 0x00002F04,
+ .cfg = 0x00002F08,
+ .header_addr = 0x00002F0C,
+ .header_cfg = 0x00002F10,
+ .image_addr = 0x00002F14,
+ .image_addr_offset = 0x00002F18,
+ .buffer_width_cfg = 0x00002F1C,
+ .buffer_height_cfg = 0x00002F20,
+ .packer_cfg = 0x00002F24,
+ .stride = 0x00002F28,
+ .irq_subsample_period = 0x00002F48,
+ .irq_subsample_pattern = 0x00002F4C,
+ .framedrop_period = 0x00002F50,
+ .framedrop_pattern = 0x00002F54,
+ .frame_inc = 0x00002F58,
+ .burst_limit = 0x00002F5C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 14 */
+ {
+ .status0 = 0x00003000,
+ .status1 = 0x00003004,
+ .cfg = 0x00003008,
+ .header_addr = 0x0000300C,
+ .header_cfg = 0x00003010,
+ .image_addr = 0x00003014,
+ .image_addr_offset = 0x00003018,
+ .buffer_width_cfg = 0x0000301C,
+ .buffer_height_cfg = 0x00003020,
+ .packer_cfg = 0x00003024,
+ .stride = 0x00003028,
+ .irq_subsample_period = 0x00003048,
+ .irq_subsample_pattern = 0x0000304C,
+ .framedrop_period = 0x00003050,
+ .framedrop_pattern = 0x00003054,
+ .frame_inc = 0x00003058,
+ .burst_limit = 0x0000305C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 15 */
+ {
+ .status0 = 0x00003100,
+ .status1 = 0x00003104,
+ .cfg = 0x00003108,
+ .header_addr = 0x0000310C,
+ .header_cfg = 0x00003110,
+ .image_addr = 0x00003114,
+ .image_addr_offset = 0x00003118,
+ .buffer_width_cfg = 0x0000311C,
+ .buffer_height_cfg = 0x00003120,
+ .packer_cfg = 0x00003124,
+ .stride = 0x00003128,
+ .irq_subsample_period = 0x00003148,
+ .irq_subsample_pattern = 0x0000314C,
+ .framedrop_period = 0x00003150,
+ .framedrop_pattern = 0x00003154,
+ .frame_inc = 0x00003158,
+ .burst_limit = 0x0000315C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 16 */
+ {
+ .status0 = 0x00003200,
+ .status1 = 0x00003204,
+ .cfg = 0x00003208,
+ .header_addr = 0x0000320C,
+ .header_cfg = 0x00003210,
+ .image_addr = 0x00003214,
+ .image_addr_offset = 0x00003218,
+ .buffer_width_cfg = 0x0000321C,
+ .buffer_height_cfg = 0x00003220,
+ .packer_cfg = 0x00003224,
+ .stride = 0x00003228,
+ .irq_subsample_period = 0x00003248,
+ .irq_subsample_pattern = 0x0000324C,
+ .framedrop_period = 0x00003250,
+ .framedrop_pattern = 0x00003254,
+ .frame_inc = 0x00003258,
+ .burst_limit = 0x0000325C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 17 */
+ {
+ .status0 = 0x00003300,
+ .status1 = 0x00003304,
+ .cfg = 0x00003308,
+ .header_addr = 0x0000330C,
+ .header_cfg = 0x00003310,
+ .image_addr = 0x00003314,
+ .image_addr_offset = 0x00003318,
+ .buffer_width_cfg = 0x0000331C,
+ .buffer_height_cfg = 0x00003320,
+ .packer_cfg = 0x00003324,
+ .stride = 0x00003328,
+ .irq_subsample_period = 0x00003348,
+ .irq_subsample_pattern = 0x0000334C,
+ .framedrop_period = 0x00003350,
+ .framedrop_pattern = 0x00003354,
+ .frame_inc = 0x00003358,
+ .burst_limit = 0x0000335C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 18 */
+ {
+ .status0 = 0x00003400,
+ .status1 = 0x00003404,
+ .cfg = 0x00003408,
+ .header_addr = 0x0000340C,
+ .header_cfg = 0x00003410,
+ .image_addr = 0x00003414,
+ .image_addr_offset = 0x00003418,
+ .buffer_width_cfg = 0x0000341C,
+ .buffer_height_cfg = 0x00003420,
+ .packer_cfg = 0x00003424,
+ .stride = 0x00003428,
+ .irq_subsample_period = 0x00003448,
+ .irq_subsample_pattern = 0x0000344C,
+ .framedrop_period = 0x00003450,
+ .framedrop_pattern = 0x00003454,
+ .frame_inc = 0x00003458,
+ .burst_limit = 0x0000345C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 19 */
+ {
+ .status0 = 0x00003500,
+ .status1 = 0x00003504,
+ .cfg = 0x00003508,
+ .header_addr = 0x0000350C,
+ .header_cfg = 0x00003510,
+ .image_addr = 0x00003514,
+ .image_addr_offset = 0x00003518,
+ .buffer_width_cfg = 0x0000351C,
+ .buffer_height_cfg = 0x00003520,
+ .packer_cfg = 0x00003524,
+ .stride = 0x00003528,
+ .irq_subsample_period = 0x00003548,
+ .irq_subsample_pattern = 0x0000354C,
+ .framedrop_period = 0x00003550,
+ .framedrop_pattern = 0x00003554,
+ .frame_inc = 0x00003558,
+ .burst_limit = 0x0000355C,
+ .ubwc_regs = NULL,
+ },
+ },
+ .comp_grp_reg = {
+ /* CAM_VFE_BUS_VER2_COMP_GRP_0 */
+ {
+ .comp_mask = 0x00002010,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_1 */
+ {
+ .comp_mask = 0x00002014,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_2 */
+ {
+ .comp_mask = 0x00002018,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_3 */
+ {
+ .comp_mask = 0x0000201C,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_4 */
+ {
+ .comp_mask = 0x00002020,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_5 */
+ {
+ .comp_mask = 0x00002024,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
+ {
+ .comp_mask = 0x0000202C,
+ .addr_sync_mask = 0x00002088,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
+ {
+ .comp_mask = 0x00002030,
+ .addr_sync_mask = 0x0000208C,
+
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
+ {
+ .comp_mask = 0x00002034,
+ .addr_sync_mask = 0x00002090,
+
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
+ {
+ .comp_mask = 0x00002038,
+ .addr_sync_mask = 0x00002094,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
+ {
+ .comp_mask = 0x0000203C,
+ .addr_sync_mask = 0x00002098,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
+ {
+ .comp_mask = 0x00002040,
+ .addr_sync_mask = 0x0000209C,
+ },
+ },
+ .num_out = 18,
+ .vfe_out_hw_info = {
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FULL,
+ .max_width = 4096,
+ .max_height = 4096,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS4,
+ .max_width = 1920,
+ .max_height = 1080,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_DS16,
+ .max_width = 1920,
+ .max_height = 1080,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_FD,
+ .max_width = 1920,
+ .max_height = 1080,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_PDAF,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+ .max_width = 1920,
+ .max_height = 1080,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ },
+};
+
+struct cam_vfe_hw_info cam_vfe170_hw_info = {
+ .irq_reg_info = &vfe170_top_irq_reg_info,
+
+ .bus_version = CAM_VFE_BUS_VER_2_0,
+ .bus_hw_info = &vfe170_bus_hw_info,
+
+ .top_version = CAM_VFE_TOP_VER_2_0,
+ .top_hw_info = &vfe170_top_hw_info,
+
+ .camif_version = CAM_VFE_CAMIF_VER_2_0,
+ .camif_reg = &vfe170_camif_reg,
+
+ .camif_lite_version = 0,
+ .camif_reg = NULL,
+
+};
+
+#endif /* _CAM_VFE170_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c
new file mode 100644
index 000000000000..6ec5ffbde4e0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c
@@ -0,0 +1,57 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_vfe170.h"
+#include "cam_vfe_lite17x.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_dev.h"
+
+static const struct of_device_id cam_vfe_dt_match[] = {
+ {
+ .compatible = "qcom,vfe170",
+ .data = &cam_vfe170_hw_info,
+ },
+ {
+ .compatible = "qcom,vfe-lite170",
+ .data = &cam_vfe_lite17x_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_vfe_dt_match);
+
+static struct platform_driver cam_vfe_driver = {
+ .probe = cam_vfe_probe,
+ .remove = cam_vfe_remove,
+ .driver = {
+ .name = "cam_vfe17x",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_vfe_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_vfe_init_module(void)
+{
+ return platform_driver_register(&cam_vfe_driver);
+}
+
+static void __exit cam_vfe_exit_module(void)
+{
+ platform_driver_unregister(&cam_vfe_driver);
+}
+
+module_init(cam_vfe_init_module);
+module_exit(cam_vfe_exit_module);
+MODULE_DESCRIPTION("CAM VFE17X driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h
new file mode 100644
index 000000000000..9767f9716581
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h
@@ -0,0 +1,336 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_LITE17X_H_
+#define _CAM_VFE_LITE17X_H_
+
+#include "cam_vfe_bus_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_core.h"
+
+static struct cam_irq_register_set vfe17x_top_irq_reg_set[2] = {
+ {
+ .mask_reg_offset = 0x0000005C,
+ .clear_reg_offset = 0x00000064,
+ .status_reg_offset = 0x0000006C,
+ },
+ {
+ .mask_reg_offset = 0x00000060,
+ .clear_reg_offset = 0x00000068,
+ .status_reg_offset = 0x00000070,
+ },
+};
+
+static struct cam_irq_controller_reg_info vfe17x_top_irq_reg_info = {
+ .num_registers = 2,
+ .irq_reg_set = vfe17x_top_irq_reg_set,
+ .global_clear_offset = 0x00000058,
+ .global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_top_ver2_reg_offset_common vfe17x_top_common_reg = {
+ .hw_version = 0x00000000,
+ .hw_capability = 0x00000004,
+ .lens_feature = 0x00000008,
+ .stats_feature = 0x0000000C,
+ .color_feature = 0x00000010,
+ .zoom_feature = 0x00000014,
+ .global_reset_cmd = 0x00000018,
+ .module_ctrl = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+ .bus_cgc_ovd = 0x0000003C,
+ .core_cfg = 0x00000000,
+ .three_D_cfg = 0x00000000,
+ .violation_status = 0x0000007C,
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_ver2_reg vfe17x_rdi_reg = {
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data vfe17x_rdi_0_data = {
+ .reg_update_cmd_data = 0x2,
+ .sof_irq_mask = 0x8000000,
+ .reg_update_irq_mask = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data vfe17x_rdi_1_data = {
+ .reg_update_cmd_data = 0x4,
+ .sof_irq_mask = 0x10000000,
+ .reg_update_irq_mask = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data vfe17x_rdi_2_data = {
+ .reg_update_cmd_data = 0x8,
+ .sof_irq_mask = 0x20000000,
+ .reg_update_irq_mask = 0x80,
+};
+
+static struct cam_vfe_rdi_reg_data vfe17x_rdi_3_data = {
+ .reg_update_cmd_data = 0x10,
+ .sof_irq_mask = 0x40000000,
+ .reg_update_irq_mask = 0x100,
+};
+
+static struct cam_vfe_top_ver2_hw_info vfe17x_top_hw_info = {
+ .common_reg = &vfe17x_top_common_reg,
+ .camif_hw_info = {
+ .common_reg = NULL,
+ .camif_reg = NULL,
+ .reg_data = NULL,
+ },
+ .rdi_hw_info = {
+ .common_reg = &vfe17x_top_common_reg,
+ .rdi_reg = &vfe17x_rdi_reg,
+ .reg_data = {
+ &vfe17x_rdi_0_data,
+ &vfe17x_rdi_1_data,
+ &vfe17x_rdi_2_data,
+ &vfe17x_rdi_3_data,
+ },
+ },
+ .mux_type = {
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ CAM_VFE_RDI_VER_1_0,
+ },
+};
+
+static struct cam_irq_register_set vfe17x_bus_irq_reg[3] = {
+ {
+ .mask_reg_offset = 0x00002044,
+ .clear_reg_offset = 0x00002050,
+ .status_reg_offset = 0x0000205C,
+ },
+ {
+ .mask_reg_offset = 0x00002048,
+ .clear_reg_offset = 0x00002054,
+ .status_reg_offset = 0x00002060,
+ },
+ {
+ .mask_reg_offset = 0x0000204C,
+ .clear_reg_offset = 0x00002058,
+ .status_reg_offset = 0x00002064,
+ },
+};
+
+static struct cam_vfe_bus_ver2_hw_info vfe17x_bus_hw_info = {
+ .common_reg = {
+ .hw_version = 0x00002000,
+ .hw_capability = 0x00002004,
+ .sw_reset = 0x00002008,
+ .cgc_ovd = 0x0000200C,
+ .pwr_iso_cfg = 0x000020CC,
+ .dual_master_comp_cfg = 0x00002028,
+ .irq_reg_info = {
+ .num_registers = 3,
+ .irq_reg_set = vfe17x_bus_irq_reg,
+ .global_clear_offset = 0x00002068,
+ .global_clear_bitmask = 0x00000001,
+ },
+ .comp_error_status = 0x0000206C,
+ .comp_ovrwr_status = 0x00002070,
+ .dual_comp_error_status = 0x00002074,
+ .dual_comp_ovrwr_status = 0x00002078,
+ .addr_sync_cfg = 0x0000207C,
+ .addr_sync_frame_hdr = 0x00002080,
+ .addr_sync_no_sync = 0x00002084,
+ },
+ .num_client = 4,
+ .bus_client_reg = {
+ /* BUS Client 0 */
+ {
+ .status0 = 0x00002200,
+ .status1 = 0x00002204,
+ .cfg = 0x00002208,
+ .header_addr = 0x0000220C,
+ .header_cfg = 0x00002210,
+ .image_addr = 0x00002214,
+ .image_addr_offset = 0x00002218,
+ .buffer_width_cfg = 0x0000221C,
+ .buffer_height_cfg = 0x00002220,
+ .packer_cfg = 0x00002224,
+ .stride = 0x00002228,
+ .irq_subsample_period = 0x00002248,
+ .irq_subsample_pattern = 0x0000224C,
+ .framedrop_period = 0x00002250,
+ .framedrop_pattern = 0x00002254,
+ .frame_inc = 0x00002258,
+ .burst_limit = 0x0000225C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 1 */
+ {
+ .status0 = 0x00002300,
+ .status1 = 0x00002304,
+ .cfg = 0x00002308,
+ .header_addr = 0x0000230C,
+ .header_cfg = 0x00002310,
+ .image_addr = 0x00002314,
+ .image_addr_offset = 0x00002318,
+ .buffer_width_cfg = 0x0000231C,
+ .buffer_height_cfg = 0x00002320,
+ .packer_cfg = 0x00002324,
+ .stride = 0x00002328,
+ .irq_subsample_period = 0x00002348,
+ .irq_subsample_pattern = 0x0000234C,
+ .framedrop_period = 0x00002350,
+ .framedrop_pattern = 0x00002354,
+ .frame_inc = 0x00002358,
+ .burst_limit = 0x0000235C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 2 */
+ {
+ .status0 = 0x00002400,
+ .status1 = 0x00002404,
+ .cfg = 0x00002408,
+ .header_addr = 0x0000240C,
+ .header_cfg = 0x00002410,
+ .image_addr = 0x00002414,
+ .image_addr_offset = 0x00002418,
+ .buffer_width_cfg = 0x0000241C,
+ .buffer_height_cfg = 0x00002420,
+ .packer_cfg = 0x00002424,
+ .stride = 0x00002428,
+ .irq_subsample_period = 0x00002448,
+ .irq_subsample_pattern = 0x0000244C,
+ .framedrop_period = 0x00002450,
+ .framedrop_pattern = 0x00002454,
+ .frame_inc = 0x00002458,
+ .burst_limit = 0x0000245C,
+ .ubwc_regs = NULL,
+ },
+ /* BUS Client 3 */
+ {
+ .status0 = 0x00002500,
+ .status1 = 0x00002504,
+ .cfg = 0x00002508,
+ .header_addr = 0x0000250C,
+ .header_cfg = 0x00002510,
+ .image_addr = 0x00002514,
+ .image_addr_offset = 0x00002518,
+ .buffer_width_cfg = 0x0000251C,
+ .buffer_height_cfg = 0x00002520,
+ .packer_cfg = 0x00002524,
+ .stride = 0x00002528,
+ .irq_subsample_period = 0x00002548,
+ .irq_subsample_pattern = 0x0000254C,
+ .framedrop_period = 0x00002550,
+ .framedrop_pattern = 0x00002554,
+ .frame_inc = 0x00002558,
+ .burst_limit = 0x0000255C,
+ .ubwc_regs = NULL,
+ },
+ },
+ .comp_grp_reg = {
+ /* CAM_VFE_BUS_VER2_COMP_GRP_0 */
+ {
+ .comp_mask = 0x00002010,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_1 */
+ {
+ .comp_mask = 0x00002014,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_2 */
+ {
+ .comp_mask = 0x00002018,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_3 */
+ {
+ .comp_mask = 0x0000201C,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_4 */
+ {
+ .comp_mask = 0x00002020,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_5 */
+ {
+ .comp_mask = 0x00002024,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
+ {
+ .comp_mask = 0x0000202C,
+ .addr_sync_mask = 0x00002088,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
+ {
+ .comp_mask = 0x00002030,
+ .addr_sync_mask = 0x0000208C,
+
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
+ {
+ .comp_mask = 0x00002034,
+ .addr_sync_mask = 0x00002090,
+
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
+ {
+ .comp_mask = 0x00002038,
+ .addr_sync_mask = 0x00002094,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
+ {
+ .comp_mask = 0x0000203C,
+ .addr_sync_mask = 0x00002098,
+ },
+ /* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
+ {
+ .comp_mask = 0x00002040,
+ .addr_sync_mask = 0x0000209C,
+ },
+ },
+ .num_out = 4,
+ .vfe_out_hw_info = {
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ {
+ .vfe_out_type = CAM_VFE_BUS_VER2_VFE_OUT_RDI3,
+ .max_width = -1,
+ .max_height = -1,
+ },
+ },
+};
+
+static struct cam_vfe_hw_info cam_vfe_lite17x_hw_info = {
+ .irq_reg_info = &vfe17x_top_irq_reg_info,
+
+ .bus_version = CAM_VFE_BUS_VER_2_0,
+ .bus_hw_info = &vfe17x_bus_hw_info,
+
+ .top_version = CAM_VFE_TOP_VER_2_0,
+ .top_hw_info = &vfe17x_top_hw_info,
+
+};
+
+#endif /* _CAM_VFE_LITE17X_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
new file mode 100644
index 000000000000..1823497221ba
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_bus.o cam_vfe_bus_ver2.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
new file mode 100644
index 000000000000..4c0c147f4122
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
@@ -0,0 +1,55 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_vfe_bus.h"
+#include "cam_vfe_bus_ver1.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_debug_util.h"
+
+int cam_vfe_bus_init(uint32_t bus_version,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ void *bus_hw_info,
+ void *vfe_irq_controller,
+ struct cam_vfe_bus **vfe_bus)
+{
+ int rc = -ENODEV;
+
+ switch (bus_version) {
+ case CAM_VFE_BUS_VER_2_0:
+ rc = cam_vfe_bus_ver2_init(soc_info, hw_intf, bus_hw_info,
+ vfe_irq_controller, vfe_bus);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
+ break;
+ }
+
+ return rc;
+}
+
+int cam_vfe_bus_deinit(uint32_t bus_version,
+ struct cam_vfe_bus **vfe_bus)
+{
+ int rc = -ENODEV;
+
+ switch (bus_version) {
+ case CAM_VFE_BUS_VER_2_0:
+ rc = cam_vfe_bus_ver2_deinit(vfe_bus);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h
new file mode 100644
index 000000000000..bc12ac37b180
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_BUS_VER1_H_
+#define _CAM_VFE_BUS_VER1_H_
+
+enum cam_vfe_bus_ver1_pingpong_id {
+ CAM_VFE_BUS_VER1_PING,
+ CAM_VFE_BUS_VER1_PONG,
+ CAM_VFE_BUS_VER1_PINGPONG_MAX,
+};
+
+enum cam_vfe_bus_ver1_wm_type {
+ CAM_VFE_BUS_WM_TYPE_IMAGE,
+ CAM_VFE_BUS_WM_TYPE_STATS,
+ CAM_VFE_BUS_WM_TYPE_MAX,
+};
+
+enum cam_vfe_bus_ver1_comp_grp_type {
+ CAM_VFE_BUS_VER1_COMP_GRP_IMG0,
+ CAM_VFE_BUS_VER1_COMP_GRP_IMG1,
+ CAM_VFE_BUS_VER1_COMP_GRP_IMG2,
+ CAM_VFE_BUS_VER1_COMP_GRP_IMG3,
+ CAM_VFE_BUS_VER1_COMP_GRP_STATS0,
+ CAM_VFE_BUS_VER1_COMP_GRP_STATS1,
+ CAM_VFE_BUS_VER1_COMP_GRP_MAX,
+};
+
+struct cam_vfe_bus_ver1_common_reg {
+ uint32_t cmd_offset;
+ uint32_t cfg_offset;
+ uint32_t io_fmt_offset;
+ uint32_t argb_cfg_offset;
+ uint32_t xbar_cfg0_offset;
+ uint32_t xbar_cfg1_offset;
+ uint32_t xbar_cfg2_offset;
+ uint32_t xbar_cfg3_offset;
+ uint32_t ping_pong_status_reg;
+};
+
+struct cam_vfe_bus_ver1_wm_reg {
+ uint32_t wm_cfg_offset;
+ uint32_t ping_addr_offset;
+ uint32_t ping_max_addr_offset;
+ uint32_t pong_addr_offset;
+ uint32_t pong_max_addr_offset;
+ uint32_t addr_cfg_offset;
+ uint32_t ub_cfg_offset;
+ uint32_t image_size_offset;
+ uint32_t buffer_cfg_offset;
+ uint32_t framedrop_pattern_offset;
+ uint32_t irq_subsample_pattern_offset;
+ uint32_t ping_pong_status_bit; /* 0 - 31 */
+ uint32_t composite_bit; /* 0 -31 */
+};
+
+struct cam_vfe_bus_ver1_wm_resource_data {
+ uint32_t index;
+ uint32_t wm_type;
+ uint32_t res_type;
+
+ uint32_t offset;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t scanline;
+
+ uint32_t burst_len;
+
+ uint32_t framedrop_period;
+ uint32_t framedrop_pattern;
+
+ uint32_t buf_valid[CAM_VFE_BUS_VER1_PINGPONG_MAX];
+ uint32_t ub_size;
+ uint32_t ub_offset;
+
+ struct cam_vfe_bus_ver1_wm_reg hw_regs;
+};
+
+struct cam_vfe_bus_ver1_comp_grp_reg {
+ enum cam_vfe_bus_ver1_comp_grp_type comp_grp_type;
+ uint32_t comp_grp_offset;
+};
+
+struct cam_vfe_bus_ver1_comp_grp {
+ struct cam_vfe_bus_ver1_comp_grp_reg reg_info;
+ struct list_head wm_list;
+ uint32_t cur_bit_mask;
+};
+
+/*
+ * cam_vfe_bus_ver1_init()
+ *
+ * @Brief: Initialize Bus layer
+ *
+ * @mem_base: Mapped base address of register space
+ * @hw_intf: HW Interface of HW to which this resource belongs
+ * @bus_hw_info: BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top
+ * level IRQs
+ * @vfe_bus: Pointer to vfe_bus structure which will be filled
+ * and returned on successful initialize
+ */
+int cam_vfe_bus_ver1_init(
+ void __iomem *mem_base,
+ struct cam_hw_intf *hw_intf,
+ void *bus_hw_info,
+ void *vfe_irq_controller,
+ struct cam_vfe_bus **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_VER1_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
new file mode 100644
index 000000000000..7ee9f000a8c0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -0,0 +1,3622 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+#include "cam_hw_intf.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_vfe_core.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+
+static const char drv_name[] = "vfe_bus";
+
+#define CAM_VFE_BUS_IRQ_REG0 0
+#define CAM_VFE_BUS_IRQ_REG1 1
+#define CAM_VFE_BUS_IRQ_REG2 2
+#define CAM_VFE_BUS_IRQ_MAX 3
+
+#define CAM_VFE_BUS_VER2_PAYLOAD_MAX 256
+
+#define CAM_VFE_BUS_SET_DEBUG_REG 0x82
+
+#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH 0xFF01
+#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE 0xFF01
+#define CAM_VFE_BUS_INTRA_CLIENT_MASK 0x3
+#define CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT 8
+#define CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL \
+ ((1 << CAM_VFE_BUS_VER2_MAX_CLIENTS) - 1)
+
+#define ALIGNUP(value, alignment) \
+ ((value + alignment - 1) / alignment * alignment)
+
+#define MAX_BUF_UPDATE_REG_NUM \
+ ((sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client) + \
+ sizeof(struct cam_vfe_bus_ver2_reg_offset_ubwc_client))/4)
+#define MAX_REG_VAL_PAIR_SIZE \
+ (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+
+#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val) \
+ do { \
+ buf_array[(index)++] = offset; \
+ buf_array[(index)++] = val; \
+ } while (0)
+
+static uint32_t bus_error_irq_mask[3] = {
+ 0x7800,
+ 0x0000,
+ 0x0040,
+};
+
+enum cam_vfe_bus_packer_format {
+ PACKER_FMT_PLAIN_128 = 0x0,
+ PACKER_FMT_PLAIN_8 = 0x1,
+ PACKER_FMT_PLAIN_16_10BPP = 0x2,
+ PACKER_FMT_PLAIN_16_12BPP = 0x3,
+ PACKER_FMT_PLAIN_16_14BPP = 0x4,
+ PACKER_FMT_PLAIN_16_16BPP = 0x5,
+ PACKER_FMT_ARGB_10 = 0x6,
+ PACKER_FMT_ARGB_12 = 0x7,
+ PACKER_FMT_ARGB_14 = 0x8,
+ PACKER_FMT_PLAIN_32_20BPP = 0x9,
+ PACKER_FMT_PLAIN_64 = 0xA,
+ PACKER_FMT_TP_10 = 0xB,
+ PACKER_FMT_PLAIN_32_32BPP = 0xC,
+ PACKER_FMT_PLAIN_8_ODD_EVEN = 0xD,
+ PACKER_FMT_PLAIN_8_LSB_MSB_10 = 0xE,
+ PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN = 0xF,
+ PACKER_FMT_MAX = 0xF,
+};
+
+enum cam_vfe_bus_comp_grp_id {
+ CAM_VFE_BUS_COMP_GROUP_NONE = -EINVAL,
+ CAM_VFE_BUS_COMP_GROUP_ID_0 = 0x0,
+ CAM_VFE_BUS_COMP_GROUP_ID_1 = 0x1,
+ CAM_VFE_BUS_COMP_GROUP_ID_2 = 0x2,
+ CAM_VFE_BUS_COMP_GROUP_ID_3 = 0x3,
+ CAM_VFE_BUS_COMP_GROUP_ID_4 = 0x4,
+ CAM_VFE_BUS_COMP_GROUP_ID_5 = 0x5,
+};
+
+struct cam_vfe_bus_ver2_common_data {
+ uint32_t core_index;
+ void __iomem *mem_base;
+ struct cam_hw_intf *hw_intf;
+ void *bus_irq_controller;
+ void *vfe_irq_controller;
+ struct cam_vfe_bus_ver2_reg_offset_common *common_reg;
+ uint32_t io_buf_update[
+ MAX_REG_VAL_PAIR_SIZE];
+
+ struct cam_vfe_bus_irq_evt_payload evt_payload[
+ CAM_VFE_BUS_VER2_PAYLOAD_MAX];
+ struct list_head free_payload_list;
+ spinlock_t spin_lock;
+ struct mutex bus_mutex;
+ uint32_t secure_mode;
+ uint32_t num_sec_out;
+ uint32_t addr_no_sync;
+};
+
+struct cam_vfe_bus_ver2_wm_resource_data {
+ uint32_t index;
+ struct cam_vfe_bus_ver2_common_data *common_data;
+ struct cam_vfe_bus_ver2_reg_offset_bus_client *hw_regs;
+ void *ctx;
+
+ uint32_t irq_enabled;
+ bool init_cfg_done;
+ bool hfr_cfg_done;
+
+ uint32_t offset;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t format;
+ enum cam_vfe_bus_packer_format pack_fmt;
+
+ uint32_t burst_len;
+
+ uint32_t en_ubwc;
+ bool ubwc_updated;
+ uint32_t packer_cfg;
+ uint32_t tile_cfg;
+ uint32_t h_init;
+ uint32_t v_init;
+ uint32_t ubwc_meta_stride;
+ uint32_t ubwc_mode_cfg_0;
+ uint32_t ubwc_mode_cfg_1;
+ uint32_t ubwc_meta_offset;
+
+ uint32_t irq_subsample_period;
+ uint32_t irq_subsample_pattern;
+ uint32_t framedrop_period;
+ uint32_t framedrop_pattern;
+
+ uint32_t en_cfg;
+ uint32_t is_dual;
+};
+
+struct cam_vfe_bus_ver2_comp_grp_data {
+ enum cam_vfe_bus_ver2_comp_grp_type comp_grp_type;
+ struct cam_vfe_bus_ver2_common_data *common_data;
+ struct cam_vfe_bus_ver2_reg_offset_comp_grp *hw_regs;
+
+ uint32_t irq_enabled;
+ uint32_t comp_grp_local_idx;
+ uint32_t unique_id;
+
+ uint32_t is_master;
+ uint32_t dual_slave_core;
+ uint32_t intra_client_mask;
+ uint32_t composite_mask;
+ uint32_t addr_sync_mode;
+
+ uint32_t acquire_dev_cnt;
+ uint32_t irq_trigger_cnt;
+
+ void *ctx;
+};
+
+struct cam_vfe_bus_ver2_vfe_out_data {
+ uint32_t out_type;
+ struct cam_vfe_bus_ver2_common_data *common_data;
+
+ uint32_t num_wm;
+ struct cam_isp_resource_node *wm_res[PLANE_MAX];
+
+ struct cam_isp_resource_node *comp_grp;
+ enum cam_isp_hw_sync_mode dual_comp_sync_mode;
+ uint32_t dual_hw_alternate_vfe_id;
+ struct list_head vfe_out_list;
+
+ uint32_t format;
+ uint32_t max_width;
+ uint32_t max_height;
+ struct cam_cdm_utils_ops *cdm_util_ops;
+ uint32_t secure_mode;
+};
+
+struct cam_vfe_bus_ver2_priv {
+ struct cam_vfe_bus_ver2_common_data common_data;
+ uint32_t num_client;
+ uint32_t num_out;
+
+ struct cam_isp_resource_node bus_client[CAM_VFE_BUS_VER2_MAX_CLIENTS];
+ struct cam_isp_resource_node comp_grp[CAM_VFE_BUS_VER2_COMP_GRP_MAX];
+ struct cam_isp_resource_node vfe_out[CAM_VFE_BUS_VER2_VFE_OUT_MAX];
+
+ struct list_head free_comp_grp;
+ struct list_head free_dual_comp_grp;
+ struct list_head used_comp_grp;
+
+ uint32_t irq_handle;
+ uint32_t error_irq_handle;
+ void *tasklet_info;
+};
+
+static int cam_vfe_bus_process_cmd(
+ struct cam_isp_resource_node *priv,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+
+static int cam_vfe_bus_get_evt_payload(
+ struct cam_vfe_bus_ver2_common_data *common_data,
+ struct cam_vfe_bus_irq_evt_payload **evt_payload)
+{
+ int rc;
+
+ spin_lock(&common_data->spin_lock);
+ if (list_empty(&common_data->free_payload_list)) {
+ *evt_payload = NULL;
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+ rc = -ENODEV;
+ goto done;
+ }
+
+ *evt_payload = list_first_entry(&common_data->free_payload_list,
+ struct cam_vfe_bus_irq_evt_payload, list);
+ list_del_init(&(*evt_payload)->list);
+ rc = 0;
+done:
+ spin_unlock(&common_data->spin_lock);
+ return rc;
+}
+
+static enum cam_vfe_bus_comp_grp_id
+ cam_vfe_bus_comp_grp_id_convert(uint32_t comp_grp)
+{
+ switch (comp_grp) {
+ case CAM_ISP_RES_COMP_GROUP_ID_0:
+ return CAM_VFE_BUS_COMP_GROUP_ID_0;
+ case CAM_ISP_RES_COMP_GROUP_ID_1:
+ return CAM_VFE_BUS_COMP_GROUP_ID_1;
+ case CAM_ISP_RES_COMP_GROUP_ID_2:
+ return CAM_VFE_BUS_COMP_GROUP_ID_2;
+ case CAM_ISP_RES_COMP_GROUP_ID_3:
+ return CAM_VFE_BUS_COMP_GROUP_ID_3;
+ case CAM_ISP_RES_COMP_GROUP_ID_4:
+ return CAM_VFE_BUS_COMP_GROUP_ID_4;
+ case CAM_ISP_RES_COMP_GROUP_ID_5:
+ return CAM_VFE_BUS_COMP_GROUP_ID_5;
+ case CAM_ISP_RES_COMP_GROUP_NONE:
+ default:
+ return CAM_VFE_BUS_COMP_GROUP_NONE;
+ }
+}
+
+static int cam_vfe_bus_put_evt_payload(void *core_info,
+ struct cam_vfe_bus_irq_evt_payload **evt_payload)
+{
+ struct cam_vfe_bus_ver2_common_data *common_data = NULL;
+ uint32_t *ife_irq_regs = NULL;
+ uint32_t status_reg0, status_reg1, status_reg2;
+ unsigned long flags;
+
+ if (!core_info) {
+ CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+ return -EINVAL;
+ }
+ if (*evt_payload == NULL) {
+ CAM_ERR(CAM_ISP, "No payload to put");
+ return -EINVAL;
+ }
+ (*evt_payload)->error_type = 0;
+ ife_irq_regs = (*evt_payload)->irq_reg_val;
+ status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
+ status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
+ status_reg2 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
+
+ if (status_reg0 || status_reg1 || status_reg2) {
+ CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
+ status_reg0, status_reg1, status_reg2);
+ return 0;
+ }
+
+ common_data = core_info;
+
+ spin_lock_irqsave(&common_data->spin_lock, flags);
+ list_add_tail(&(*evt_payload)->list,
+ &common_data->free_payload_list);
+ spin_unlock_irqrestore(&common_data->spin_lock, flags);
+
+ *evt_payload = NULL;
+
+ CAM_DBG(CAM_ISP, "Done");
+ return 0;
+}
+
+static int cam_vfe_bus_ver2_get_intra_client_mask(
+ enum cam_vfe_bus_ver2_vfe_core_id dual_slave_core,
+ enum cam_vfe_bus_ver2_vfe_core_id current_core,
+ uint32_t *intra_client_mask)
+{
+ int rc = 0;
+ uint32_t camera_hw_version = 0;
+ uint32_t version_based_intra_client_mask = 0x1;
+
+ *intra_client_mask = 0;
+
+
+ if (dual_slave_core == current_core) {
+ CAM_ERR(CAM_ISP,
+ "Invalid params. Same core as Master and Slave");
+ return -EINVAL;
+ }
+
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+
+ CAM_DBG(CAM_ISP, "CPAS VERSION %d", camera_hw_version);
+
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_170_V100:
+ version_based_intra_client_mask = 0x3;
+ break;
+ default:
+ version_based_intra_client_mask = 0x1;
+ break;
+ }
+
+
+ switch (current_core) {
+ case CAM_VFE_BUS_VER2_VFE_CORE_0:
+ switch (dual_slave_core) {
+ case CAM_VFE_BUS_VER2_VFE_CORE_1:
+ *intra_client_mask = version_based_intra_client_mask;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
+ dual_slave_core);
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_CORE_1:
+ switch (dual_slave_core) {
+ case CAM_VFE_BUS_VER2_VFE_CORE_0:
+ *intra_client_mask = version_based_intra_client_mask;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
+ dual_slave_core);
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ISP,
+ "Invalid value for master core %u", current_core);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static bool cam_vfe_bus_can_be_secure(uint32_t out_type)
+{
+ switch (out_type) {
+ case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+ case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+ case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+ case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP:
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP:
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP:
+ return true;
+
+ case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+ case CAM_VFE_BUS_VER2_VFE_OUT_2PD:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+ default:
+ return false;
+ }
+}
+
+static enum cam_vfe_bus_ver2_vfe_out_type
+ cam_vfe_bus_get_out_res_id(uint32_t res_type)
+{
+ switch (res_type) {
+ case CAM_ISP_IFE_OUT_RES_FULL:
+ return CAM_VFE_BUS_VER2_VFE_OUT_FULL;
+ case CAM_ISP_IFE_OUT_RES_DS4:
+ return CAM_VFE_BUS_VER2_VFE_OUT_DS4;
+ case CAM_ISP_IFE_OUT_RES_DS16:
+ return CAM_VFE_BUS_VER2_VFE_OUT_DS16;
+ case CAM_ISP_IFE_OUT_RES_FD:
+ return CAM_VFE_BUS_VER2_VFE_OUT_FD;
+ case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
+ return CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP;
+ case CAM_ISP_IFE_OUT_RES_PDAF:
+ return CAM_VFE_BUS_VER2_VFE_OUT_PDAF;
+ case CAM_ISP_IFE_OUT_RES_2PD:
+ return CAM_VFE_BUS_VER2_VFE_OUT_2PD;
+ case CAM_ISP_IFE_OUT_RES_RDI_0:
+ return CAM_VFE_BUS_VER2_VFE_OUT_RDI0;
+ case CAM_ISP_IFE_OUT_RES_RDI_1:
+ return CAM_VFE_BUS_VER2_VFE_OUT_RDI1;
+ case CAM_ISP_IFE_OUT_RES_RDI_2:
+ return CAM_VFE_BUS_VER2_VFE_OUT_RDI2;
+ case CAM_ISP_IFE_OUT_RES_RDI_3:
+ return CAM_VFE_BUS_VER2_VFE_OUT_RDI3;
+ case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE;
+ case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST;
+ case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG;
+ case CAM_ISP_IFE_OUT_RES_STATS_BF:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF;
+ case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG;
+ case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST;
+ case CAM_ISP_IFE_OUT_RES_STATS_RS:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS;
+ case CAM_ISP_IFE_OUT_RES_STATS_CS:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS;
+ case CAM_ISP_IFE_OUT_RES_STATS_IHIST:
+ return CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST;
+ case CAM_ISP_IFE_OUT_RES_FULL_DISP:
+ return CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP;
+ case CAM_ISP_IFE_OUT_RES_DS4_DISP:
+ return CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP;
+ case CAM_ISP_IFE_OUT_RES_DS16_DISP:
+ return CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP;
+ default:
+ return CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+ }
+}
+
+static int cam_vfe_bus_get_num_wm(
+ enum cam_vfe_bus_ver2_vfe_out_type res_type,
+ uint32_t format)
+{
+ switch (res_type) {
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI3:
+ switch (format) {
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_MIPI_RAW_12:
+ case CAM_FORMAT_MIPI_RAW_14:
+ case CAM_FORMAT_MIPI_RAW_16:
+ case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_DPCM_10_6_10:
+ case CAM_FORMAT_DPCM_10_8_10:
+ case CAM_FORMAT_DPCM_12_6_12:
+ case CAM_FORMAT_DPCM_12_8_12:
+ case CAM_FORMAT_DPCM_14_8_14:
+ case CAM_FORMAT_DPCM_14_10_14:
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ case CAM_FORMAT_PLAIN32_20:
+ case CAM_FORMAT_PLAIN128:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+ case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP:
+ switch (format) {
+ case CAM_FORMAT_NV21:
+ case CAM_FORMAT_NV12:
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_TP10:
+ case CAM_FORMAT_UBWC_NV12:
+ case CAM_FORMAT_UBWC_NV12_4R:
+ case CAM_FORMAT_UBWC_TP10:
+ case CAM_FORMAT_UBWC_P010:
+ case CAM_FORMAT_PLAIN16_10:
+ return 2;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+ switch (format) {
+ case CAM_FORMAT_NV21:
+ case CAM_FORMAT_NV12:
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_TP10:
+ case CAM_FORMAT_PLAIN16_10:
+ return 2;
+ case CAM_FORMAT_Y_ONLY:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP:
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP:
+ switch (format) {
+ case CAM_FORMAT_PD8:
+ case CAM_FORMAT_PD10:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+ switch (format) {
+ case CAM_FORMAT_ARGB_14:
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+ switch (format) {
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_2PD:
+ switch (format) {
+ case CAM_FORMAT_PLAIN16_8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ case CAM_FORMAT_PLAIN64:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+ switch (format) {
+ case CAM_FORMAT_PLAIN64:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+ switch (format) {
+ case CAM_FORMAT_PLAIN16_16:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ CAM_ERR(CAM_ISP, "Unsupported format %u for resource_type %u",
+ format, res_type);
+
+ return -EINVAL;
+}
+
+static int cam_vfe_bus_get_wm_idx(
+ enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id,
+ enum cam_vfe_bus_plane_type plane)
+{
+ int wm_idx = -1;
+
+ switch (vfe_out_res_id) {
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 0;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_RDI3:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 3;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 3;
+ break;
+ case PLANE_C:
+ wm_idx = 4;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 6;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 7;
+ break;
+ case PLANE_C:
+ wm_idx = 8;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 9;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+ case CAM_VFE_BUS_VER2_VFE_OUT_2PD:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 10;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 11;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 12;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 13;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 14;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 15;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 16;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 17;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 18;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 19;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 20;
+ break;
+ case PLANE_C:
+ wm_idx = 21;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 22;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP:
+ switch (plane) {
+ case PLANE_Y:
+ wm_idx = 23;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return wm_idx;
+}
+
+static enum cam_vfe_bus_packer_format
+ cam_vfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index)
+{
+ switch (out_fmt) {
+ case CAM_FORMAT_NV21:
+ if ((wm_index == 4) || (wm_index == 6) || (wm_index == 21))
+ return PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN;
+ case CAM_FORMAT_NV12:
+ case CAM_FORMAT_UBWC_NV12:
+ case CAM_FORMAT_UBWC_NV12_4R:
+ case CAM_FORMAT_Y_ONLY:
+ return PACKER_FMT_PLAIN_8_LSB_MSB_10;
+ case CAM_FORMAT_PLAIN16_16:
+ return PACKER_FMT_PLAIN_16_16BPP;
+ case CAM_FORMAT_PLAIN64:
+ return PACKER_FMT_PLAIN_64;
+ case CAM_FORMAT_PLAIN8:
+ return PACKER_FMT_PLAIN_8;
+ case CAM_FORMAT_PLAIN16_10:
+ return PACKER_FMT_PLAIN_16_10BPP;
+ case CAM_FORMAT_PLAIN16_12:
+ return PACKER_FMT_PLAIN_16_12BPP;
+ case CAM_FORMAT_PLAIN16_14:
+ return PACKER_FMT_PLAIN_16_14BPP;
+ case CAM_FORMAT_PLAIN32_20:
+ return PACKER_FMT_PLAIN_32_20BPP;
+ case CAM_FORMAT_MIPI_RAW_6:
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_MIPI_RAW_12:
+ case CAM_FORMAT_MIPI_RAW_14:
+ case CAM_FORMAT_MIPI_RAW_16:
+ case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_PLAIN16_8:
+ case CAM_FORMAT_PLAIN128:
+ case CAM_FORMAT_PD8:
+ case CAM_FORMAT_PD10:
+ return PACKER_FMT_PLAIN_128;
+ case CAM_FORMAT_UBWC_TP10:
+ case CAM_FORMAT_TP10:
+ return PACKER_FMT_TP_10;
+ case CAM_FORMAT_ARGB_14:
+ return PACKER_FMT_ARGB_14;
+ default:
+ return PACKER_FMT_MAX;
+ }
+}
+
+static int cam_vfe_bus_acquire_wm(
+ struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
+ struct cam_isp_out_port_info *out_port_info,
+ void *tasklet,
+ void *ctx,
+ enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id,
+ enum cam_vfe_bus_plane_type plane,
+ uint32_t subscribe_irq,
+ struct cam_isp_resource_node **wm_res,
+ uint32_t *client_done_mask,
+ uint32_t is_dual)
+{
+ uint32_t wm_idx = 0;
+ struct cam_isp_resource_node *wm_res_local = NULL;
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = NULL;
+
+ *wm_res = NULL;
+ *client_done_mask = 0;
+
+ /* No need to allocate for BUS VER2. VFE OUT to WM is fixed. */
+ wm_idx = cam_vfe_bus_get_wm_idx(vfe_out_res_id, plane);
+ if (wm_idx < 0 || wm_idx >= ver2_bus_priv->num_client) {
+ CAM_ERR(CAM_ISP, "Unsupported VFE out %d plane %d",
+ vfe_out_res_id, plane);
+ return -EINVAL;
+ }
+
+ wm_res_local = &ver2_bus_priv->bus_client[wm_idx];
+ if (wm_res_local->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP, "WM res not available state:%d",
+ wm_res_local->res_state);
+ return -EALREADY;
+ }
+ wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ wm_res_local->tasklet_info = tasklet;
+
+ rsrc_data = wm_res_local->res_priv;
+ rsrc_data->irq_enabled = subscribe_irq;
+ rsrc_data->ctx = ctx;
+ rsrc_data->format = out_port_info->format;
+ rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format,
+ wm_idx);
+
+ rsrc_data->width = out_port_info->width;
+ rsrc_data->height = out_port_info->height;
+ rsrc_data->is_dual = is_dual;
+ /* Set WM offset value to default */
+ rsrc_data->offset = 0;
+ CAM_DBG(CAM_ISP, "WM %d width %d height %d", rsrc_data->index,
+ rsrc_data->width, rsrc_data->height);
+
+ if (rsrc_data->index < 3) {
+ /* Write master 0-2 refers to RDI 0/ RDI 1/RDI 2 */
+ switch (rsrc_data->format) {
+ case CAM_FORMAT_MIPI_RAW_6:
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_MIPI_RAW_12:
+ case CAM_FORMAT_MIPI_RAW_14:
+ case CAM_FORMAT_MIPI_RAW_16:
+ case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_PLAIN128:
+ rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+ rsrc_data->height = 0;
+ rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
+ rsrc_data->pack_fmt = 0x0;
+ rsrc_data->en_cfg = 0x3;
+ break;
+ case CAM_FORMAT_PLAIN8:
+ rsrc_data->en_cfg = 0x1;
+ rsrc_data->pack_fmt = 0x1;
+ rsrc_data->width = rsrc_data->width * 2;
+ rsrc_data->stride = rsrc_data->width;
+ break;
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ case CAM_FORMAT_PLAIN32_20:
+ rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+ rsrc_data->height = 0;
+ rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
+ rsrc_data->pack_fmt = 0x0;
+ rsrc_data->en_cfg = 0x3;
+ break;
+ case CAM_FORMAT_PLAIN64:
+ rsrc_data->en_cfg = 0x1;
+ rsrc_data->pack_fmt = 0xA;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Unsupported RDI format %d",
+ rsrc_data->format);
+ return -EINVAL;
+ }
+ } else if ((rsrc_data->index < 5) ||
+ (rsrc_data->index == 7) || (rsrc_data->index == 8) ||
+ (rsrc_data->index == 20) || (rsrc_data->index == 21)) {
+ /*
+ * Write master 3, 4 - for Full OUT , 7-8 FD OUT,
+ * WM 20-21 = FULL_DISP
+ */
+ switch (rsrc_data->format) {
+ case CAM_FORMAT_UBWC_NV12_4R:
+ rsrc_data->en_ubwc = 1;
+ rsrc_data->width = ALIGNUP(rsrc_data->width, 64);
+ switch (plane) {
+ case PLANE_C:
+ rsrc_data->height /= 2;
+ break;
+ case PLANE_Y:
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+ return -EINVAL;
+ }
+ break;
+ case CAM_FORMAT_UBWC_NV12:
+ rsrc_data->en_ubwc = 1;
+ /* Fall through for NV12 */
+ case CAM_FORMAT_NV21:
+ case CAM_FORMAT_NV12:
+ case CAM_FORMAT_Y_ONLY:
+ switch (plane) {
+ case PLANE_C:
+ rsrc_data->height /= 2;
+ break;
+ case PLANE_Y:
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+ return -EINVAL;
+ }
+ break;
+ case CAM_FORMAT_UBWC_TP10:
+ rsrc_data->en_ubwc = 1;
+ rsrc_data->width =
+ ALIGNUP(rsrc_data->width, 48) * 4 / 3;
+ switch (plane) {
+ case PLANE_C:
+ rsrc_data->height /= 2;
+ break;
+ case PLANE_Y:
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+ return -EINVAL;
+ }
+ break;
+ case CAM_FORMAT_TP10:
+ rsrc_data->width =
+ ALIGNUP(rsrc_data->width, 3) * 4 / 3;
+ switch (plane) {
+ case PLANE_C:
+ rsrc_data->height /= 2;
+ break;
+ case PLANE_Y:
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+ return -EINVAL;
+ }
+ break;
+ case CAM_FORMAT_PLAIN16_10:
+ switch (plane) {
+ case PLANE_C:
+ rsrc_data->height /= 2;
+ break;
+ case PLANE_Y:
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+ return -EINVAL;
+ }
+ rsrc_data->width *= 2;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid format %d",
+ rsrc_data->format);
+ return -EINVAL;
+ }
+ rsrc_data->en_cfg = 0x1;
+ } else if (rsrc_data->index >= 11 && rsrc_data->index < 20) {
+ /* Write master 11 - 19 stats */
+ rsrc_data->width = 0;
+ rsrc_data->height = 0;
+ rsrc_data->stride = 1;
+ rsrc_data->en_cfg = 0x3;
+ } else if (rsrc_data->index == 10) {
+ /* Write master 10 - PDAF/2PD */
+ rsrc_data->width = 0;
+ rsrc_data->height = 0;
+ rsrc_data->stride = 1;
+ rsrc_data->en_cfg = 0x3;
+ if (vfe_out_res_id == CAM_VFE_BUS_VER2_VFE_OUT_PDAF)
+ /* LSB aligned */
+ rsrc_data->pack_fmt |= 0x10;
+ } else if (rsrc_data->index == 9) {
+ /* Write master 9 - Raw dump */
+ rsrc_data->width = rsrc_data->width * 2;
+ rsrc_data->stride = rsrc_data->width;
+ rsrc_data->en_cfg = 0x1;
+ /* LSB aligned */
+ rsrc_data->pack_fmt |= 0x10;
+ } else {
+ /* Write master 5-6 DS ports */
+ uint32_t align_width;
+
+ rsrc_data->width = rsrc_data->width * 4;
+ rsrc_data->height = rsrc_data->height / 2;
+ rsrc_data->en_cfg = 0x1;
+ CAM_DBG(CAM_ISP, "before width %d", rsrc_data->width);
+ align_width = ALIGNUP(rsrc_data->width, 16);
+ if (align_width != rsrc_data->width) {
+ CAM_WARN(CAM_ISP,
+ "Override width %u with expected %u",
+ rsrc_data->width, align_width);
+ rsrc_data->width = align_width;
+ }
+ }
+
+ *client_done_mask = (1 << wm_idx);
+ *wm_res = wm_res_local;
+
+ CAM_DBG(CAM_ISP, "WM %d: processed width %d, processed height %d",
+ rsrc_data->index, rsrc_data->width, rsrc_data->height);
+ return 0;
+}
+
+static int cam_vfe_bus_release_wm(void *bus_priv,
+ struct cam_isp_resource_node *wm_res)
+{
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data =
+ wm_res->res_priv;
+
+ rsrc_data->irq_enabled = 0;
+ rsrc_data->offset = 0;
+ rsrc_data->width = 0;
+ rsrc_data->height = 0;
+ rsrc_data->stride = 0;
+ rsrc_data->format = 0;
+ rsrc_data->pack_fmt = 0;
+ rsrc_data->burst_len = 0;
+ rsrc_data->irq_subsample_period = 0;
+ rsrc_data->irq_subsample_pattern = 0;
+ rsrc_data->framedrop_period = 0;
+ rsrc_data->framedrop_pattern = 0;
+ rsrc_data->packer_cfg = 0;
+ rsrc_data->en_ubwc = 0;
+ rsrc_data->tile_cfg = 0;
+ rsrc_data->h_init = 0;
+ rsrc_data->v_init = 0;
+ rsrc_data->ubwc_meta_stride = 0;
+ rsrc_data->ubwc_mode_cfg_0 = 0;
+ rsrc_data->ubwc_mode_cfg_1 = 0;
+ rsrc_data->ubwc_meta_offset = 0;
+ rsrc_data->init_cfg_done = false;
+ rsrc_data->hfr_cfg_done = false;
+ rsrc_data->en_cfg = 0;
+ rsrc_data->is_dual = 0;
+
+ wm_res->tasklet_info = NULL;
+ wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+ return 0;
+}
+
+static int cam_vfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
+{
+ int rc = 0, val = 0;
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data =
+ wm_res->res_priv;
+ struct cam_vfe_bus_ver2_common_data *common_data =
+ rsrc_data->common_data;
+ uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
+ uint32_t camera_hw_version;
+
+ cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
+
+ cam_io_w_mb(rsrc_data->width,
+ common_data->mem_base + rsrc_data->hw_regs->buffer_width_cfg);
+ cam_io_w(rsrc_data->height,
+ common_data->mem_base + rsrc_data->hw_regs->buffer_height_cfg);
+ cam_io_w(rsrc_data->pack_fmt,
+ common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
+
+ /* Configure stride for RDIs */
+ if (rsrc_data->index < 3)
+ cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
+ rsrc_data->hw_regs->stride));
+
+ /* Subscribe IRQ */
+ if (rsrc_data->irq_enabled) {
+ CAM_DBG(CAM_ISP, "Subscribe WM%d IRQ", rsrc_data->index);
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] =
+ (1 << rsrc_data->index);
+ wm_res->irq_handle = cam_irq_controller_subscribe_irq(
+ common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+ bus_irq_reg_mask, wm_res,
+ wm_res->top_half_handler,
+ cam_ife_mgr_do_tasklet_buf_done,
+ wm_res->tasklet_info, &tasklet_bh_api);
+ if (wm_res->irq_handle < 0) {
+ CAM_ERR(CAM_ISP, "Subscribe IRQ failed for WM %d",
+ rsrc_data->index);
+ return -EFAULT;
+ }
+ }
+
+ /* enable ubwc if needed*/
+ if (rsrc_data->en_ubwc) {
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed to get HW version:%d rc:%d",
+ camera_hw_version, rc);
+ return rc;
+ }
+ if ((camera_hw_version > CAM_CPAS_TITAN_NONE) &&
+ (camera_hw_version < CAM_CPAS_TITAN_175_V100)) {
+ struct cam_vfe_bus_ver2_reg_offset_ubwc_client
+ *ubwc_regs;
+
+ ubwc_regs =
+ (struct
+ cam_vfe_bus_ver2_reg_offset_ubwc_client *)
+ rsrc_data->hw_regs->ubwc_regs;
+ val = cam_io_r_mb(common_data->mem_base +
+ ubwc_regs->mode_cfg_0);
+ val |= 0x1;
+ cam_io_w_mb(val, common_data->mem_base +
+ ubwc_regs->mode_cfg_0);
+ } else if ((camera_hw_version == CAM_CPAS_TITAN_175_V100) ||
+ (camera_hw_version == CAM_CPAS_TITAN_175_V101) ||
+ (camera_hw_version == CAM_CPAS_TITAN_175_V120)) {
+ struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client
+ *ubwc_regs;
+
+ ubwc_regs =
+ (struct
+ cam_vfe_bus_ver2_reg_offset_ubwc_3_client *)
+ rsrc_data->hw_regs->ubwc_regs;
+ val = cam_io_r_mb(common_data->mem_base +
+ ubwc_regs->mode_cfg_0);
+ val |= 0x1;
+ cam_io_w_mb(val, common_data->mem_base +
+ ubwc_regs->mode_cfg_0);
+ } else {
+ CAM_ERR(CAM_ISP, "Invalid HW version: %d",
+ camera_hw_version);
+ return -EINVAL;
+ }
+ }
+
+ /* Enable WM */
+ cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base +
+ rsrc_data->hw_regs->cfg);
+
+ CAM_DBG(CAM_ISP, "WM res %d width = %d, height = %d", rsrc_data->index,
+ rsrc_data->width, rsrc_data->height);
+ CAM_DBG(CAM_ISP, "WM res %d pk_fmt = %d", rsrc_data->index,
+ rsrc_data->pack_fmt & PACKER_FMT_MAX);
+ CAM_DBG(CAM_ISP, "WM res %d stride = %d, burst len = %d",
+ rsrc_data->index, rsrc_data->stride, 0xf);
+ CAM_DBG(CAM_ISP, "enable WM res %d offset 0x%x val 0x%x",
+ rsrc_data->index, (uint32_t) rsrc_data->hw_regs->cfg,
+ rsrc_data->en_cfg);
+
+ wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+ return rc;
+}
+
+static int cam_vfe_bus_stop_wm(struct cam_isp_resource_node *wm_res)
+{
+ int rc = 0;
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data =
+ wm_res->res_priv;
+ struct cam_vfe_bus_ver2_common_data *common_data =
+ rsrc_data->common_data;
+
+ /* Disable WM */
+ cam_io_w_mb(0x0,
+ common_data->mem_base + rsrc_data->hw_regs->cfg);
+
+ /* Disable all register access, reply on global reset */
+ CAM_DBG(CAM_ISP, "WM res %d irq_enabled %d",
+ rsrc_data->index, rsrc_data->irq_enabled);
+ /* Unsubscribe IRQ */
+ if (rsrc_data->irq_enabled)
+ rc = cam_irq_controller_unsubscribe_irq(
+ common_data->bus_irq_controller,
+ wm_res->irq_handle);
+
+ wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ rsrc_data->init_cfg_done = false;
+ rsrc_data->hfr_cfg_done = false;
+
+ return rc;
+}
+
+static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int32_t rc;
+ int i;
+ struct cam_isp_resource_node *wm_res = NULL;
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = NULL;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+ wm_res = th_payload->handler_priv;
+ if (!wm_res) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Error: No resource");
+ return -ENODEV;
+ }
+
+ rsrc_data = wm_res->res_priv;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+
+ rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1],
+ th_payload->evt_status_arr[2]);
+
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->ctx = rsrc_data->ctx;
+ evt_payload->core_index = rsrc_data->common_data->core_index;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
+}
+
+static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node,
+ void *evt_payload_priv)
+{
+ int rc = CAM_VFE_IRQ_STATUS_ERR;
+ struct cam_isp_resource_node *wm_res = wm_node;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv;
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data =
+ (wm_res == NULL) ? NULL : wm_res->res_priv;
+ uint32_t *cam_ife_irq_regs;
+ uint32_t status_reg;
+
+ if (!evt_payload || !rsrc_data)
+ return rc;
+
+ cam_ife_irq_regs = evt_payload->irq_reg_val;
+ status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
+
+ if (status_reg & BIT(rsrc_data->index)) {
+ cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1] &=
+ ~BIT(rsrc_data->index);
+ rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ CAM_DBG(CAM_ISP, "status_reg %x rc %d wm_idx %d",
+ status_reg, rc, rsrc_data->index);
+
+ if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
+ cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
+ &evt_payload);
+
+ return rc;
+}
+
+
+static int cam_vfe_bus_err_bottom_half(void *ctx_priv,
+ void *evt_payload_priv)
+{
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+ struct cam_vfe_bus_ver2_common_data *common_data;
+ uint32_t val = 0;
+
+ if (!ctx_priv || !evt_payload_priv)
+ return -EINVAL;
+
+ evt_payload = evt_payload_priv;
+ common_data = evt_payload->ctx;
+
+ val = evt_payload->debug_status_0;
+ CAM_ERR(CAM_ISP, "Bus Violation: debug_status_0 = 0x%x", val);
+
+ if (val & 0x01)
+ CAM_INFO(CAM_ISP, "RDI 0 violation");
+
+ if (val & 0x02)
+ CAM_INFO(CAM_ISP, "RDI 1 violation");
+
+ if (val & 0x04)
+ CAM_INFO(CAM_ISP, "RDI 2 violation");
+
+ if (val & 0x08)
+ CAM_INFO(CAM_ISP, "VID Y 1:1 UBWC violation");
+
+ if (val & 0x010)
+ CAM_INFO(CAM_ISP, "VID C 1:1 UBWC violation");
+
+ if (val & 0x020)
+ CAM_INFO(CAM_ISP, "VID YC 4:1 violation");
+
+ if (val & 0x040)
+ CAM_INFO(CAM_ISP, "VID YC 16:1 violation");
+
+ if (val & 0x080)
+ CAM_INFO(CAM_ISP, "FD Y violation");
+
+ if (val & 0x0100)
+ CAM_INFO(CAM_ISP, "FD C violation");
+
+ if (val & 0x0200)
+ CAM_INFO(CAM_ISP, "RAW DUMP violation");
+
+ if (val & 0x0400)
+ CAM_INFO(CAM_ISP, "PDAF violation");
+
+ if (val & 0x0800)
+ CAM_INFO(CAM_ISP, "STATs HDR BE violation");
+
+ if (val & 0x01000)
+ CAM_INFO(CAM_ISP, "STATs HDR BHIST violation");
+
+ if (val & 0x02000)
+ CAM_INFO(CAM_ISP, "STATs TINTLESS BG violation");
+
+ if (val & 0x04000)
+ CAM_INFO(CAM_ISP, "STATs BF violation");
+
+ if (val & 0x08000)
+ CAM_INFO(CAM_ISP, "STATs AWB BG UBWC violation");
+
+ if (val & 0x010000)
+ CAM_INFO(CAM_ISP, "STATs BHIST violation");
+
+ if (val & 0x020000)
+ CAM_INFO(CAM_ISP, "STATs RS violation");
+
+ if (val & 0x040000)
+ CAM_INFO(CAM_ISP, "STATs CS violation");
+
+ if (val & 0x080000)
+ CAM_INFO(CAM_ISP, "STATs IHIST violation");
+
+ if (val & 0x0100000)
+ CAM_INFO(CAM_ISP, "DISP Y 1:1 UBWC violation");
+
+ if (val & 0x0200000)
+ CAM_INFO(CAM_ISP, "DISP C 1:1 UBWC violation");
+
+ if (val & 0x0400000)
+ CAM_INFO(CAM_ISP, "DISP YC 4:1 violation");
+
+ if (val & 0x0800000)
+ CAM_INFO(CAM_ISP, "DISP YC 16:1 violation");
+
+ cam_vfe_bus_put_evt_payload(common_data, &evt_payload);
+ return 0;
+}
+
+static int cam_vfe_bus_init_wm_resource(uint32_t index,
+ struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
+ struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
+ struct cam_isp_resource_node *wm_res)
+{
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
+
+ rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_wm_resource_data),
+ GFP_KERNEL);
+ if (!rsrc_data) {
+ CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv");
+ return -ENOMEM;
+ }
+ wm_res->res_priv = rsrc_data;
+
+ rsrc_data->index = index;
+ rsrc_data->hw_regs = &ver2_hw_info->bus_client_reg[index];
+ rsrc_data->common_data = &ver2_bus_priv->common_data;
+
+ wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ INIT_LIST_HEAD(&wm_res->list);
+
+ wm_res->start = cam_vfe_bus_start_wm;
+ wm_res->stop = cam_vfe_bus_stop_wm;
+ wm_res->top_half_handler = cam_vfe_bus_handle_wm_done_top_half;
+ wm_res->bottom_half_handler = cam_vfe_bus_handle_wm_done_bottom_half;
+ wm_res->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+ return 0;
+}
+
+static int cam_vfe_bus_deinit_wm_resource(
+ struct cam_isp_resource_node *wm_res)
+{
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
+
+ wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ INIT_LIST_HEAD(&wm_res->list);
+
+ wm_res->start = NULL;
+ wm_res->stop = NULL;
+ wm_res->top_half_handler = NULL;
+ wm_res->bottom_half_handler = NULL;
+ wm_res->hw_intf = NULL;
+
+ rsrc_data = wm_res->res_priv;
+ wm_res->res_priv = NULL;
+ if (!rsrc_data)
+ return -ENOMEM;
+ kfree(rsrc_data);
+
+ return 0;
+}
+
+static void cam_vfe_bus_add_wm_to_comp_grp(
+ struct cam_isp_resource_node *comp_grp,
+ uint32_t composite_mask)
+{
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = comp_grp->res_priv;
+
+ rsrc_data->composite_mask |= composite_mask;
+}
+
+static void cam_vfe_bus_match_comp_grp(
+ struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
+ struct cam_isp_resource_node **comp_grp,
+ uint32_t comp_grp_local_idx,
+ uint32_t unique_id)
+{
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
+ struct cam_isp_resource_node *comp_grp_local = NULL;
+
+ list_for_each_entry(comp_grp_local,
+ &ver2_bus_priv->used_comp_grp, list) {
+ rsrc_data = comp_grp_local->res_priv;
+ if (rsrc_data->comp_grp_local_idx == comp_grp_local_idx &&
+ rsrc_data->unique_id == unique_id) {
+ /* Match found */
+ *comp_grp = comp_grp_local;
+ return;
+ }
+ }
+
+ *comp_grp = NULL;
+}
+
+static int cam_vfe_bus_acquire_comp_grp(
+ struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
+ struct cam_isp_out_port_info *out_port_info,
+ void *tasklet,
+ void *ctx,
+ uint32_t unique_id,
+ uint32_t is_dual,
+ uint32_t is_master,
+ enum cam_vfe_bus_ver2_vfe_core_id dual_slave_core,
+ struct cam_isp_resource_node **comp_grp)
+{
+ int rc = 0;
+ uint32_t bus_comp_grp_id;
+ struct cam_isp_resource_node *comp_grp_local = NULL;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
+
+ bus_comp_grp_id = cam_vfe_bus_comp_grp_id_convert(
+ out_port_info->comp_grp_id);
+ /* Perform match only if there is valid comp grp request */
+ if (out_port_info->comp_grp_id != CAM_ISP_RES_COMP_GROUP_NONE) {
+ /* Check if matching comp_grp already acquired */
+ cam_vfe_bus_match_comp_grp(ver2_bus_priv, &comp_grp_local,
+ bus_comp_grp_id, unique_id);
+ }
+
+ if (!comp_grp_local) {
+ /* First find a free group */
+ if (is_dual) {
+ CAM_DBG(CAM_ISP, "Acquire dual comp group");
+ if (list_empty(&ver2_bus_priv->free_dual_comp_grp)) {
+ CAM_ERR(CAM_ISP, "No Free Composite Group");
+ return -ENODEV;
+ }
+ comp_grp_local = list_first_entry(
+ &ver2_bus_priv->free_dual_comp_grp,
+ struct cam_isp_resource_node, list);
+ rsrc_data = comp_grp_local->res_priv;
+ rc = cam_vfe_bus_ver2_get_intra_client_mask(
+ dual_slave_core,
+ comp_grp_local->hw_intf->hw_idx,
+ &rsrc_data->intra_client_mask);
+ if (rc)
+ return rc;
+ } else {
+ CAM_DBG(CAM_ISP, "Acquire comp group");
+ if (list_empty(&ver2_bus_priv->free_comp_grp)) {
+ CAM_ERR(CAM_ISP, "No Free Composite Group");
+ return -ENODEV;
+ }
+ comp_grp_local = list_first_entry(
+ &ver2_bus_priv->free_comp_grp,
+ struct cam_isp_resource_node, list);
+ rsrc_data = comp_grp_local->res_priv;
+ }
+
+ list_del(&comp_grp_local->list);
+ comp_grp_local->tasklet_info = tasklet;
+ comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+ rsrc_data->is_master = is_master;
+ rsrc_data->composite_mask = 0;
+ rsrc_data->unique_id = unique_id;
+ rsrc_data->comp_grp_local_idx = bus_comp_grp_id;
+
+ if (is_master)
+ rsrc_data->addr_sync_mode = 0;
+ else
+ rsrc_data->addr_sync_mode = 1;
+
+ list_add_tail(&comp_grp_local->list,
+ &ver2_bus_priv->used_comp_grp);
+
+ } else {
+ rsrc_data = comp_grp_local->res_priv;
+ /* Do not support runtime change in composite mask */
+ if (comp_grp_local->res_state ==
+ CAM_ISP_RESOURCE_STATE_STREAMING) {
+ CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u",
+ comp_grp_local->res_state,
+ rsrc_data->comp_grp_type);
+ return -EBUSY;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "Comp Grp type %u", rsrc_data->comp_grp_type);
+
+ rsrc_data->ctx = ctx;
+ rsrc_data->acquire_dev_cnt++;
+ *comp_grp = comp_grp_local;
+
+ return rc;
+}
+
+static int cam_vfe_bus_release_comp_grp(
+ struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
+ struct cam_isp_resource_node *in_comp_grp)
+{
+ struct cam_isp_resource_node *comp_grp = NULL;
+ struct cam_vfe_bus_ver2_comp_grp_data *in_rsrc_data = NULL;
+ int match_found = 0;
+
+ if (!in_comp_grp) {
+ CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", in_comp_grp);
+ return -EINVAL;
+ }
+
+ if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP, "Already released Comp Grp");
+ return 0;
+ }
+
+ if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
+ CAM_ERR(CAM_ISP, "Invalid State %d",
+ in_comp_grp->res_state);
+ return -EBUSY;
+ }
+
+ in_rsrc_data = in_comp_grp->res_priv;
+ CAM_DBG(CAM_ISP, "Comp Grp type %u", in_rsrc_data->comp_grp_type);
+
+ list_for_each_entry(comp_grp, &ver2_bus_priv->used_comp_grp, list) {
+ if (comp_grp == in_comp_grp) {
+ match_found = 1;
+ break;
+ }
+ }
+
+ if (!match_found) {
+ CAM_ERR(CAM_ISP, "Could not find matching Comp Grp type %u",
+ in_rsrc_data->comp_grp_type);
+ return -ENODEV;
+ }
+
+ in_rsrc_data->acquire_dev_cnt--;
+ if (in_rsrc_data->acquire_dev_cnt == 0) {
+ list_del(&comp_grp->list);
+
+ in_rsrc_data->unique_id = 0;
+ in_rsrc_data->comp_grp_local_idx = CAM_VFE_BUS_COMP_GROUP_NONE;
+ in_rsrc_data->composite_mask = 0;
+ in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+ in_rsrc_data->addr_sync_mode = 0;
+
+ comp_grp->tasklet_info = NULL;
+ comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+ if (in_rsrc_data->comp_grp_type >=
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+ in_rsrc_data->comp_grp_type <=
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5)
+ list_add_tail(&comp_grp->list,
+ &ver2_bus_priv->free_dual_comp_grp);
+ else if (in_rsrc_data->comp_grp_type >=
+ CAM_VFE_BUS_VER2_COMP_GRP_0 &&
+ in_rsrc_data->comp_grp_type <=
+ CAM_VFE_BUS_VER2_COMP_GRP_5)
+ list_add_tail(&comp_grp->list,
+ &ver2_bus_priv->free_comp_grp);
+ }
+
+ return 0;
+}
+
+static int cam_vfe_bus_start_comp_grp(struct cam_isp_resource_node *comp_grp)
+{
+ int rc = 0;
+ uint32_t addr_sync_cfg;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
+ comp_grp->res_priv;
+ struct cam_vfe_bus_ver2_common_data *common_data =
+ rsrc_data->common_data;
+ uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
+
+ CAM_DBG(CAM_ISP, "comp group id:%d streaming state:%d",
+ rsrc_data->comp_grp_type, comp_grp->res_state);
+
+ cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
+ rsrc_data->hw_regs->comp_mask);
+ if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+ return 0;
+
+ CAM_DBG(CAM_ISP, "composite_mask is 0x%x", rsrc_data->composite_mask);
+ CAM_DBG(CAM_ISP, "composite_mask addr 0x%x",
+ rsrc_data->hw_regs->comp_mask);
+
+ if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+ rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) {
+ int dual_comp_grp = (rsrc_data->comp_grp_type -
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
+
+ if (rsrc_data->is_master) {
+ int intra_client_en = cam_io_r_mb(
+ common_data->mem_base +
+ common_data->common_reg->dual_master_comp_cfg);
+
+ /*
+ * 2 Bits per comp_grp. Hence left shift by
+ * comp_grp * 2
+ */
+ intra_client_en |=
+ (rsrc_data->intra_client_mask <<
+ (dual_comp_grp * 2));
+
+ cam_io_w_mb(intra_client_en, common_data->mem_base +
+ common_data->common_reg->dual_master_comp_cfg);
+
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG2] =
+ (1 << dual_comp_grp);
+ }
+
+ CAM_DBG(CAM_ISP, "addr_sync_mask addr 0x%x",
+ rsrc_data->hw_regs->addr_sync_mask);
+ cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
+ rsrc_data->hw_regs->addr_sync_mask);
+
+ addr_sync_cfg = cam_io_r_mb(common_data->mem_base +
+ common_data->common_reg->addr_sync_cfg);
+ addr_sync_cfg |= (rsrc_data->addr_sync_mode << dual_comp_grp);
+ /*
+ * 2 Bits per dual_comp_grp. dual_comp_grp stats at bit number
+ * 8. Hence left shift cdual_comp_grp dual comp_grp * 2 and
+ * add 8
+ */
+ addr_sync_cfg |=
+ (rsrc_data->intra_client_mask <<
+ ((dual_comp_grp * 2) +
+ CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT));
+ cam_io_w_mb(addr_sync_cfg, common_data->mem_base +
+ common_data->common_reg->addr_sync_cfg);
+
+ common_data->addr_no_sync &= ~(rsrc_data->composite_mask);
+ cam_io_w_mb(common_data->addr_no_sync, common_data->mem_base +
+ common_data->common_reg->addr_sync_no_sync);
+ CAM_DBG(CAM_ISP, "addr_sync_cfg: 0x%x addr_no_sync_cfg: 0x%x",
+ addr_sync_cfg, common_data->addr_no_sync);
+ } else {
+ /* IRQ bits for COMP GRP start at 5. So add 5 to the shift */
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG0] =
+ (1 << (rsrc_data->comp_grp_type + 5));
+ }
+
+ /*
+ * For Dual composite subscribe IRQ only for master
+ * For regular composite, subscribe IRQ always
+ */
+ CAM_DBG(CAM_ISP, "Subscribe COMP_GRP%d IRQ", rsrc_data->comp_grp_type);
+ if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+ rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
+ (rsrc_data->is_master)) ||
+ (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
+ rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
+ comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
+ common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+ bus_irq_reg_mask, comp_grp,
+ comp_grp->top_half_handler,
+ cam_ife_mgr_do_tasklet_buf_done,
+ comp_grp->tasklet_info, &tasklet_bh_api);
+ if (comp_grp->irq_handle < 0) {
+ CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
+ rsrc_data->comp_grp_type);
+ return -EFAULT;
+ }
+ }
+ comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+ return rc;
+}
+
+static int cam_vfe_bus_stop_comp_grp(struct cam_isp_resource_node *comp_grp)
+{
+ int rc = 0;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
+ comp_grp->res_priv;
+ struct cam_vfe_bus_ver2_common_data *common_data =
+ rsrc_data->common_data;
+
+ /* Unsubscribe IRQ */
+ if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+ rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
+ (rsrc_data->is_master)) ||
+ (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
+ rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
+ rc = cam_irq_controller_unsubscribe_irq(
+ common_data->bus_irq_controller,
+ comp_grp->irq_handle);
+ }
+ comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+ return rc;
+}
+
+static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int32_t rc;
+ int i;
+ struct cam_isp_resource_node *comp_grp = NULL;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+ comp_grp = th_payload->handler_priv;
+ if (!comp_grp) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
+ return -ENODEV;
+ }
+
+ rsrc_data = comp_grp->res_priv;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+ CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
+
+ rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue");
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1],
+ th_payload->evt_status_arr[2]);
+
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->ctx = rsrc_data->ctx;
+ evt_payload->core_index = rsrc_data->common_data->core_index;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
+}
+
+static int cam_vfe_bus_handle_comp_done_bottom_half(
+ void *handler_priv,
+ void *evt_payload_priv)
+{
+ int rc = CAM_VFE_IRQ_STATUS_ERR;
+ struct cam_isp_resource_node *comp_grp = handler_priv;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = comp_grp->res_priv;
+ uint32_t *cam_ife_irq_regs;
+ uint32_t status_reg;
+ uint32_t comp_err_reg;
+ uint32_t comp_grp_id;
+
+ CAM_DBG(CAM_ISP, "comp grp type %d", rsrc_data->comp_grp_type);
+
+ if (!evt_payload)
+ return rc;
+
+ cam_ife_irq_regs = evt_payload->irq_reg_val;
+
+ switch (rsrc_data->comp_grp_type) {
+ case CAM_VFE_BUS_VER2_COMP_GRP_0:
+ case CAM_VFE_BUS_VER2_COMP_GRP_1:
+ case CAM_VFE_BUS_VER2_COMP_GRP_2:
+ case CAM_VFE_BUS_VER2_COMP_GRP_3:
+ case CAM_VFE_BUS_VER2_COMP_GRP_4:
+ case CAM_VFE_BUS_VER2_COMP_GRP_5:
+ comp_grp_id = (rsrc_data->comp_grp_type -
+ CAM_VFE_BUS_VER2_COMP_GRP_0);
+
+ /* Check for Regular composite error */
+ status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
+
+ comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_COMP_ERR];
+ if ((status_reg & BIT(11)) &&
+ (comp_err_reg & rsrc_data->composite_mask)) {
+ /* Check for Regular composite error */
+ rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
+ break;
+ }
+
+ comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_COMP_OWRT];
+ /* Check for Regular composite Overwrite */
+ if ((status_reg & BIT(12)) &&
+ (comp_err_reg & rsrc_data->composite_mask)) {
+ rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
+ break;
+ }
+
+ /* Regular Composite SUCCESS */
+ if (status_reg & BIT(comp_grp_id + 5)) {
+ rsrc_data->irq_trigger_cnt++;
+ if (rsrc_data->irq_trigger_cnt ==
+ rsrc_data->acquire_dev_cnt) {
+ cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0] &=
+ ~BIT(comp_grp_id + 5);
+ rsrc_data->irq_trigger_cnt = 0;
+ }
+ rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+
+ CAM_DBG(CAM_ISP, "status reg = 0x%x, bit index = %d rc %d",
+ status_reg, (comp_grp_id + 5), rc);
+ break;
+
+ case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0:
+ case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1:
+ case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2:
+ case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3:
+ case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4:
+ case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5:
+ comp_grp_id = (rsrc_data->comp_grp_type -
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
+
+ /* Check for DUAL composite error */
+ status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
+
+ comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_ERR];
+ if ((status_reg & BIT(6)) &&
+ (comp_err_reg & rsrc_data->composite_mask)) {
+ /* Check for DUAL composite error */
+ rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
+ break;
+ }
+
+ /* Check for Dual composite Overwrite */
+ comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT];
+ if ((status_reg & BIT(7)) &&
+ (comp_err_reg & rsrc_data->composite_mask)) {
+ rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
+ break;
+ }
+
+ /* DUAL Composite SUCCESS */
+ if (status_reg & BIT(comp_grp_id)) {
+ rsrc_data->irq_trigger_cnt++;
+ if (rsrc_data->irq_trigger_cnt ==
+ rsrc_data->acquire_dev_cnt) {
+ cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2] &=
+ ~BIT(comp_grp_id);
+ rsrc_data->irq_trigger_cnt = 0;
+ }
+ rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+
+ break;
+ default:
+ rc = CAM_VFE_IRQ_STATUS_ERR;
+ CAM_ERR(CAM_ISP, "Invalid comp_grp_type %u",
+ rsrc_data->comp_grp_type);
+ break;
+ }
+
+ if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
+ cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
+ &evt_payload);
+
+ return rc;
+}
+
+static int cam_vfe_bus_init_comp_grp(uint32_t index,
+ struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
+ struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
+ struct cam_isp_resource_node *comp_grp)
+{
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
+
+ rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_comp_grp_data),
+ GFP_KERNEL);
+ if (!rsrc_data)
+ return -ENOMEM;
+
+ comp_grp->res_priv = rsrc_data;
+
+ comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ INIT_LIST_HEAD(&comp_grp->list);
+
+ rsrc_data->comp_grp_type = index;
+ rsrc_data->common_data = &ver2_bus_priv->common_data;
+ rsrc_data->hw_regs = &ver2_hw_info->comp_grp_reg[index];
+ rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+
+ if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+ rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5)
+ list_add_tail(&comp_grp->list,
+ &ver2_bus_priv->free_dual_comp_grp);
+ else if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0
+ && rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)
+ list_add_tail(&comp_grp->list, &ver2_bus_priv->free_comp_grp);
+
+ comp_grp->start = cam_vfe_bus_start_comp_grp;
+ comp_grp->stop = cam_vfe_bus_stop_comp_grp;
+ comp_grp->top_half_handler = cam_vfe_bus_handle_comp_done_top_half;
+ comp_grp->bottom_half_handler =
+ cam_vfe_bus_handle_comp_done_bottom_half;
+ comp_grp->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+ return 0;
+}
+
+static int cam_vfe_bus_deinit_comp_grp(
+ struct cam_isp_resource_node *comp_grp)
+{
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
+ comp_grp->res_priv;
+
+ comp_grp->start = NULL;
+ comp_grp->stop = NULL;
+ comp_grp->top_half_handler = NULL;
+ comp_grp->bottom_half_handler = NULL;
+ comp_grp->hw_intf = NULL;
+
+ list_del_init(&comp_grp->list);
+ comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+
+ comp_grp->res_priv = NULL;
+
+ if (!rsrc_data) {
+ CAM_ERR(CAM_ISP, "comp_grp_priv is NULL");
+ return -ENODEV;
+ }
+ kfree(rsrc_data);
+
+ return 0;
+}
+
+static int cam_vfe_bus_get_secure_mode(void *priv, void *cmd_args,
+ uint32_t arg_size)
+{
+ bool *mode = cmd_args;
+ struct cam_isp_resource_node *res =
+ (struct cam_isp_resource_node *) priv;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data =
+ (struct cam_vfe_bus_ver2_vfe_out_data *)res->res_priv;
+
+ *mode =
+ (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ?
+ true : false;
+
+ return 0;
+}
+
+static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args,
+ uint32_t args_size)
+{
+ int rc = -ENODEV;
+ int i;
+ enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id;
+ uint32_t format;
+ int num_wm;
+ uint32_t subscribe_irq;
+ uint32_t client_done_mask;
+ struct cam_vfe_bus_ver2_priv *ver2_bus_priv = bus_priv;
+ struct cam_vfe_acquire_args *acq_args = acquire_args;
+ struct cam_vfe_hw_vfe_out_acquire_args *out_acquire_args;
+ struct cam_isp_resource_node *rsrc_node = NULL;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+ uint32_t secure_caps = 0, mode;
+
+ if (!bus_priv || !acquire_args) {
+ CAM_ERR(CAM_ISP, "Invalid Param");
+ return -EINVAL;
+ }
+
+ out_acquire_args = &acq_args->vfe_out;
+ format = out_acquire_args->out_port_info->format;
+
+ CAM_DBG(CAM_ISP, "Acquiring resource type 0x%x",
+ out_acquire_args->out_port_info->res_type);
+
+ vfe_out_res_id = cam_vfe_bus_get_out_res_id(
+ out_acquire_args->out_port_info->res_type);
+ if (vfe_out_res_id == CAM_VFE_BUS_VER2_VFE_OUT_MAX)
+ return -ENODEV;
+
+ num_wm = cam_vfe_bus_get_num_wm(vfe_out_res_id, format);
+ if (num_wm < 1)
+ return -EINVAL;
+
+ rsrc_node = &ver2_bus_priv->vfe_out[vfe_out_res_id];
+ if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+ CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d",
+ vfe_out_res_id, rsrc_node->res_state);
+ return -EBUSY;
+ }
+
+ rsrc_data = rsrc_node->res_priv;
+ secure_caps = cam_vfe_bus_can_be_secure(
+ rsrc_data->out_type);
+ mode = out_acquire_args->out_port_info->secure_mode;
+ mutex_lock(&rsrc_data->common_data->bus_mutex);
+ if (secure_caps) {
+ if (!rsrc_data->common_data->num_sec_out) {
+ rsrc_data->secure_mode = mode;
+ rsrc_data->common_data->secure_mode = mode;
+ } else {
+ if (mode == rsrc_data->common_data->secure_mode) {
+ rsrc_data->secure_mode =
+ rsrc_data->common_data->secure_mode;
+ } else {
+ rc = -EINVAL;
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "Mismatch: Acquire mode[%d], drvr mode[%d]",
+ rsrc_data->common_data->secure_mode,
+ mode);
+ mutex_unlock(
+ &rsrc_data->common_data->bus_mutex);
+ return -EINVAL;
+ }
+ }
+ rsrc_data->common_data->num_sec_out++;
+ }
+ mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
+ ver2_bus_priv->tasklet_info = acq_args->tasklet;
+ rsrc_data->num_wm = num_wm;
+ rsrc_node->res_id = out_acquire_args->out_port_info->res_type;
+ rsrc_node->tasklet_info = acq_args->tasklet;
+ rsrc_node->cdm_ops = out_acquire_args->cdm_ops;
+ rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops;
+
+ /* Reserve Composite Group */
+ if (num_wm > 1 || (out_acquire_args->is_dual) ||
+ (out_acquire_args->out_port_info->comp_grp_id >
+ CAM_ISP_RES_COMP_GROUP_NONE &&
+ out_acquire_args->out_port_info->comp_grp_id <
+ CAM_ISP_RES_COMP_GROUP_ID_MAX)) {
+
+ rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv,
+ out_acquire_args->out_port_info,
+ acq_args->tasklet,
+ out_acquire_args->ctx,
+ out_acquire_args->unique_id,
+ out_acquire_args->is_dual,
+ out_acquire_args->is_master,
+ out_acquire_args->dual_slave_core,
+ &rsrc_data->comp_grp);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "VFE%d Comp_Grp acquire fail for Out %d rc=%d",
+ rsrc_data->common_data->core_index,
+ vfe_out_res_id, rc);
+ return rc;
+ }
+
+ subscribe_irq = 0;
+ } else {
+ subscribe_irq = 1;
+ }
+
+ /* Reserve WM */
+ for (i = 0; i < num_wm; i++) {
+ rc = cam_vfe_bus_acquire_wm(ver2_bus_priv,
+ out_acquire_args->out_port_info,
+ acq_args->tasklet,
+ out_acquire_args->ctx,
+ vfe_out_res_id,
+ i,
+ subscribe_irq,
+ &rsrc_data->wm_res[i],
+ &client_done_mask,
+ out_acquire_args->is_dual);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "VFE%d WM acquire failed for Out %d rc=%d",
+ rsrc_data->common_data->core_index,
+ vfe_out_res_id, rc);
+ goto release_wm;
+ }
+
+ if (rsrc_data->comp_grp)
+ cam_vfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp,
+ client_done_mask);
+ }
+
+ rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ out_acquire_args->rsrc_node = rsrc_node;
+
+ CAM_DBG(CAM_ISP, "Acquire successful");
+ return rc;
+
+release_wm:
+ for (i--; i >= 0; i--)
+ cam_vfe_bus_release_wm(ver2_bus_priv, rsrc_data->wm_res[i]);
+
+ cam_vfe_bus_release_comp_grp(ver2_bus_priv,
+ rsrc_data->comp_grp);
+
+ return rc;
+}
+
+static int cam_vfe_bus_release_vfe_out(void *bus_priv, void *release_args,
+ uint32_t args_size)
+{
+ uint32_t i;
+ struct cam_isp_resource_node *vfe_out = NULL;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+ uint32_t secure_caps = 0;
+
+ if (!bus_priv || !release_args) {
+ CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
+ bus_priv, release_args);
+ return -EINVAL;
+ }
+
+ vfe_out = release_args;
+ rsrc_data = vfe_out->res_priv;
+
+ if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_ERR(CAM_ISP, "Invalid resource state:%d",
+ vfe_out->res_state);
+ }
+
+ for (i = 0; i < rsrc_data->num_wm; i++)
+ cam_vfe_bus_release_wm(bus_priv, rsrc_data->wm_res[i]);
+ rsrc_data->num_wm = 0;
+
+ if (rsrc_data->comp_grp)
+ cam_vfe_bus_release_comp_grp(bus_priv, rsrc_data->comp_grp);
+ rsrc_data->comp_grp = NULL;
+
+ vfe_out->tasklet_info = NULL;
+ vfe_out->cdm_ops = NULL;
+ rsrc_data->cdm_util_ops = NULL;
+
+ secure_caps = cam_vfe_bus_can_be_secure(rsrc_data->out_type);
+ mutex_lock(&rsrc_data->common_data->bus_mutex);
+ if (secure_caps) {
+ if (rsrc_data->secure_mode ==
+ rsrc_data->common_data->secure_mode) {
+ rsrc_data->common_data->num_sec_out--;
+ rsrc_data->secure_mode =
+ CAM_SECURE_MODE_NON_SECURE;
+ } else {
+ /*
+ * The validity of the mode is properly
+ * checked while acquiring the output port.
+ * not expected to reach here, unless there is
+ * some corruption.
+ */
+ CAM_ERR(CAM_ISP, "driver[%d],resource[%d] mismatch",
+ rsrc_data->common_data->secure_mode,
+ rsrc_data->secure_mode);
+ }
+
+ if (!rsrc_data->common_data->num_sec_out)
+ rsrc_data->common_data->secure_mode =
+ CAM_SECURE_MODE_NON_SECURE;
+ }
+ mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
+ if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED)
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+ return 0;
+}
+
+static int cam_vfe_bus_start_vfe_out(
+ struct cam_isp_resource_node *vfe_out)
+{
+ int rc = 0, i;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+ struct cam_vfe_bus_ver2_common_data *common_data = NULL;
+
+ if (!vfe_out) {
+ CAM_ERR(CAM_ISP, "Invalid input");
+ return -EINVAL;
+ }
+
+ rsrc_data = vfe_out->res_priv;
+ common_data = rsrc_data->common_data;
+
+ CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
+
+ if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_ERR(CAM_ISP, "Invalid resource state:%d",
+ vfe_out->res_state);
+ return -EACCES;
+ }
+
+ for (i = 0; i < rsrc_data->num_wm; i++)
+ rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i]);
+
+ if (rsrc_data->comp_grp)
+ rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp);
+
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+ return rc;
+}
+
+static int cam_vfe_bus_stop_vfe_out(
+ struct cam_isp_resource_node *vfe_out)
+{
+ int rc = 0, i;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+
+ if (!vfe_out) {
+ CAM_ERR(CAM_ISP, "Invalid input");
+ return -EINVAL;
+ }
+
+ rsrc_data = vfe_out->res_priv;
+
+ if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
+ vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_DBG(CAM_ISP, "vfe_out res_state is %d", vfe_out->res_state);
+ return rc;
+ }
+
+ if (rsrc_data->comp_grp)
+ rc = cam_vfe_bus_stop_comp_grp(rsrc_data->comp_grp);
+
+ for (i = 0; i < rsrc_data->num_wm; i++)
+ rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]);
+
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ return rc;
+}
+
+static int cam_vfe_bus_handle_vfe_out_done_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ return -EPERM;
+}
+
+static int cam_vfe_bus_handle_vfe_out_done_bottom_half(
+ void *handler_priv,
+ void *evt_payload_priv)
+{
+ int rc = -EINVAL;
+ struct cam_isp_resource_node *vfe_out = handler_priv;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+ /*
+ * If this resource has Composite Group then we only handle
+ * Composite done. We acquire Composite if number of WM > 1.
+ * So Else case is only one individual buf_done = WM[0].
+ */
+ if (rsrc_data->comp_grp) {
+ rc = rsrc_data->comp_grp->bottom_half_handler(
+ rsrc_data->comp_grp, evt_payload_priv);
+ } else {
+ rc = rsrc_data->wm_res[0]->bottom_half_handler(
+ rsrc_data->wm_res[0], evt_payload_priv);
+ }
+
+ CAM_DBG(CAM_ISP, "vfe_out %d rc %d", rsrc_data->out_type, rc);
+
+ return rc;
+}
+
+static int cam_vfe_bus_init_vfe_out_resource(uint32_t index,
+ struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
+ struct cam_vfe_bus_ver2_hw_info *ver2_hw_info)
+{
+ struct cam_isp_resource_node *vfe_out = NULL;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+ int rc = 0;
+ int32_t vfe_out_type =
+ ver2_hw_info->vfe_out_hw_info[index].vfe_out_type;
+
+ if (vfe_out_type < 0 ||
+ vfe_out_type >= CAM_VFE_BUS_VER2_VFE_OUT_MAX) {
+ CAM_ERR(CAM_ISP, "Init VFE Out failed, Invalid type=%d",
+ vfe_out_type);
+ return -EINVAL;
+ }
+
+ vfe_out = &ver2_bus_priv->vfe_out[vfe_out_type];
+ if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE ||
+ vfe_out->res_priv) {
+ CAM_ERR(CAM_ISP,
+ "Error. Looks like same resource is init again");
+ return -EFAULT;
+ }
+
+ rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_vfe_out_data),
+ GFP_KERNEL);
+ if (!rsrc_data) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ vfe_out->res_priv = rsrc_data;
+
+ vfe_out->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ INIT_LIST_HEAD(&vfe_out->list);
+
+ rsrc_data->out_type =
+ ver2_hw_info->vfe_out_hw_info[index].vfe_out_type;
+ rsrc_data->common_data = &ver2_bus_priv->common_data;
+ rsrc_data->max_width =
+ ver2_hw_info->vfe_out_hw_info[index].max_width;
+ rsrc_data->max_height =
+ ver2_hw_info->vfe_out_hw_info[index].max_height;
+ rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE;
+
+ vfe_out->start = cam_vfe_bus_start_vfe_out;
+ vfe_out->stop = cam_vfe_bus_stop_vfe_out;
+ vfe_out->top_half_handler = cam_vfe_bus_handle_vfe_out_done_top_half;
+ vfe_out->bottom_half_handler =
+ cam_vfe_bus_handle_vfe_out_done_bottom_half;
+ vfe_out->process_cmd = cam_vfe_bus_process_cmd;
+ vfe_out->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+ return 0;
+}
+
+static int cam_vfe_bus_deinit_vfe_out_resource(
+ struct cam_isp_resource_node *vfe_out)
+{
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+ if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) {
+ /*
+ * This is not error. It can happen if the resource is
+ * never supported in the HW.
+ */
+ CAM_DBG(CAM_ISP, "HW%d Res %d already deinitialized");
+ return 0;
+ }
+
+ vfe_out->start = NULL;
+ vfe_out->stop = NULL;
+ vfe_out->top_half_handler = NULL;
+ vfe_out->bottom_half_handler = NULL;
+ vfe_out->hw_intf = NULL;
+
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ INIT_LIST_HEAD(&vfe_out->list);
+ vfe_out->res_priv = NULL;
+
+ if (!rsrc_data)
+ return -ENOMEM;
+ kfree(rsrc_data);
+
+ return 0;
+}
+
+static int cam_vfe_bus_ver2_handle_irq(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv;
+ int rc = 0;
+
+ bus_priv = th_payload->handler_priv;
+ CAM_DBG(CAM_ISP, "Enter");
+ rc = cam_irq_controller_handle_irq(evt_id,
+ bus_priv->common_data.bus_irq_controller);
+ return (rc == IRQ_HANDLED) ? 0 : -EINVAL;
+}
+
+static int cam_vfe_bus_error_irq_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int i = 0, rc = 0;
+ struct cam_vfe_bus_ver2_priv *bus_priv =
+ th_payload->handler_priv;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Bus Err IRQ");
+ for (i = 0; i < th_payload->num_registers; i++) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "vfe:%d: IRQ_Status%d: 0x%x",
+ bus_priv->common_data.core_index, i,
+ th_payload->evt_status_arr[i]);
+ }
+ cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller,
+ bus_priv->error_irq_handle);
+
+ rc = cam_vfe_bus_get_evt_payload(&bus_priv->common_data, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Cannot get payload");
+ return rc;
+ }
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ evt_payload->core_index = bus_priv->common_data.core_index;
+ evt_payload->evt_id = evt_id;
+ evt_payload->ctx = &bus_priv->common_data;
+ evt_payload->debug_status_0 = cam_io_r_mb(
+ bus_priv->common_data.mem_base +
+ bus_priv->common_data.common_reg->debug_status_0);
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ return rc;
+}
+
+static void cam_vfe_bus_update_ubwc_meta_addr(
+ uint32_t *reg_val_pair,
+ uint32_t *j,
+ void *regs,
+ uint64_t image_buf)
+{
+ uint32_t camera_hw_version;
+ struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_regs;
+ struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *ubwc_3_regs;
+ int rc = 0;
+
+ if (!regs || !reg_val_pair || !j) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed to get HW version rc: %d", rc);
+ goto end;
+ } else if ((camera_hw_version < CAM_CPAS_TITAN_170_V100) ||
+ (camera_hw_version > CAM_CPAS_TITAN_175_V120)) {
+ CAM_ERR(CAM_ISP, "Invalid HW version: %d",
+ camera_hw_version);
+ goto end;
+ }
+
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_170_V100:
+ case CAM_CPAS_TITAN_170_V110:
+ case CAM_CPAS_TITAN_170_V120:
+ ubwc_regs =
+ (struct cam_vfe_bus_ver2_reg_offset_ubwc_client *)regs;
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->meta_addr,
+ image_buf);
+ break;
+ case CAM_CPAS_TITAN_175_V100:
+ case CAM_CPAS_TITAN_175_V101:
+ case CAM_CPAS_TITAN_175_V120:
+ ubwc_3_regs =
+ (struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *)
+ regs;
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_3_regs->meta_addr,
+ image_buf);
+ break;
+ default:
+ break;
+ }
+end:
+ return;
+}
+
+static int cam_vfe_bus_update_ubwc_3_regs(
+ struct cam_vfe_bus_ver2_wm_resource_data *wm_data,
+ uint32_t *reg_val_pair, uint32_t i, uint32_t *j)
+{
+ struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *ubwc_regs;
+ uint32_t ubwc_bw_limit = 0;
+ int rc = 0;
+
+ if (!wm_data || !reg_val_pair || !j) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ ubwc_regs = (struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *)
+ wm_data->hw_regs->ubwc_regs;
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ wm_data->hw_regs->packer_cfg, wm_data->packer_cfg);
+ CAM_DBG(CAM_ISP, "WM %d packer cfg 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ if (wm_data->is_dual) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->tile_cfg, wm_data->tile_cfg);
+ } else {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->tile_cfg, wm_data->tile_cfg);
+ CAM_DBG(CAM_ISP, "WM %d tile cfg 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+ }
+
+ if (wm_data->is_dual) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->h_init, wm_data->offset);
+ } else {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->h_init, wm_data->h_init);
+ CAM_DBG(CAM_ISP, "WM %d h_init 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+ }
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->v_init, wm_data->v_init);
+ CAM_DBG(CAM_ISP, "WM %d v_init 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->meta_stride, wm_data->ubwc_meta_stride);
+ CAM_DBG(CAM_ISP, "WM %d meta stride 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->mode_cfg_0, wm_data->ubwc_mode_cfg_0);
+ CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_0 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->mode_cfg_1, wm_data->ubwc_mode_cfg_1);
+ CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_1 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->meta_offset, wm_data->ubwc_meta_offset);
+ CAM_DBG(CAM_ISP, "WM %d ubwc meta offset 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ switch (wm_data->format) {
+ case CAM_FORMAT_UBWC_TP10:
+ ubwc_bw_limit = (0x8 << 1) | BIT(0);
+ break;
+ case CAM_FORMAT_UBWC_NV12_4R:
+ ubwc_bw_limit = (0xB << 1) | BIT(0);
+ break;
+ default:
+ ubwc_bw_limit = 0;
+ break;
+ }
+
+ if (ubwc_bw_limit) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->bw_limit, ubwc_bw_limit);
+ CAM_DBG(CAM_ISP, "WM %d ubwc bw limit 0x%x",
+ wm_data->index, ubwc_bw_limit);
+ }
+
+end:
+ return rc;
+}
+
+static int cam_vfe_bus_update_ubwc_legacy_regs(
+ struct cam_vfe_bus_ver2_wm_resource_data *wm_data,
+ uint32_t camera_hw_version, uint32_t *reg_val_pair,
+ uint32_t i, uint32_t *j)
+{
+ struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_regs;
+ uint32_t ubwc_bw_limit = 0;
+ int rc = 0;
+
+ if (!wm_data || !reg_val_pair || !j) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ ubwc_regs = (struct cam_vfe_bus_ver2_reg_offset_ubwc_client *)
+ wm_data->hw_regs->ubwc_regs;
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ wm_data->hw_regs->packer_cfg, wm_data->packer_cfg);
+ CAM_DBG(CAM_ISP, "WM %d packer cfg 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ if (wm_data->is_dual) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->tile_cfg, wm_data->tile_cfg);
+ } else {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->tile_cfg, wm_data->tile_cfg);
+ CAM_DBG(CAM_ISP, "WM %d tile cfg 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+ }
+
+ if (wm_data->is_dual) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->h_init, wm_data->offset);
+ } else {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->h_init, wm_data->h_init);
+ CAM_DBG(CAM_ISP, "WM %d h_init 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+ }
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->v_init, wm_data->v_init);
+ CAM_DBG(CAM_ISP, "WM %d v_init 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->meta_stride, wm_data->ubwc_meta_stride);
+ CAM_DBG(CAM_ISP, "WM %d meta stride 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->mode_cfg_0, wm_data->ubwc_mode_cfg_0);
+ CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_0 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->meta_offset, wm_data->ubwc_meta_offset);
+ CAM_DBG(CAM_ISP, "WM %d ubwc meta offset 0x%x",
+ wm_data->index, reg_val_pair[*j-1]);
+
+ if (camera_hw_version == CAM_CPAS_TITAN_170_V110) {
+ switch (wm_data->format) {
+ case CAM_FORMAT_UBWC_TP10:
+ ubwc_bw_limit = 0x8 | BIT(0);
+ break;
+ case CAM_FORMAT_UBWC_NV12_4R:
+ ubwc_bw_limit = 0xB | BIT(0);
+ break;
+ default:
+ ubwc_bw_limit = 0;
+ break;
+ }
+ }
+
+ if (ubwc_bw_limit) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+ ubwc_regs->bw_limit, ubwc_bw_limit);
+ CAM_DBG(CAM_ISP, "WM %d ubwc bw limit 0x%x",
+ wm_data->index, ubwc_bw_limit);
+ }
+
+end:
+ return rc;
+}
+
+static int cam_vfe_bus_update_ubwc_regs(
+ struct cam_vfe_bus_ver2_wm_resource_data *wm_data,
+ uint32_t *reg_val_pair, uint32_t i, uint32_t *j)
+{
+ uint32_t camera_hw_version;
+ int rc = 0;
+
+ if (!wm_data || !reg_val_pair || !j) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed to get HW version rc: %d", rc);
+ goto end;
+ } else if ((camera_hw_version <= CAM_CPAS_TITAN_NONE) ||
+ (camera_hw_version >= CAM_CPAS_TITAN_MAX)) {
+ CAM_ERR(CAM_ISP, "Invalid HW version: %d",
+ camera_hw_version);
+ rc = -EINVAL;
+ goto end;
+ }
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_170_V100:
+ case CAM_CPAS_TITAN_170_V110:
+ case CAM_CPAS_TITAN_170_V120:
+ rc = cam_vfe_bus_update_ubwc_legacy_regs(
+ wm_data, camera_hw_version, reg_val_pair, i, j);
+ break;
+ case CAM_CPAS_TITAN_175_V100:
+ case CAM_CPAS_TITAN_175_V101:
+ case CAM_CPAS_TITAN_175_V120:
+ rc = cam_vfe_bus_update_ubwc_3_regs(
+ wm_data, reg_val_pair, i, j);
+ break;
+ default:
+ break;
+ }
+
+ if (rc)
+ CAM_ERR(CAM_ISP, "Failed to update ubwc regs rc:%d", rc);
+
+end:
+ return rc;
+}
+
+static int cam_vfe_bus_update_wm(void *priv, void *cmd_args,
+ uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv;
+ struct cam_isp_hw_get_cmd_update *update_buf;
+ struct cam_buf_io_cfg *io_cfg;
+ struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL;
+ struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
+ struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_client = NULL;
+ uint32_t *reg_val_pair;
+ uint32_t i, j, k, size = 0;
+ uint32_t frame_inc = 0, val;
+ uint32_t loop_size = 0;
+
+ bus_priv = (struct cam_vfe_bus_ver2_priv *) priv;
+ update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+ vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+ update_buf->res->res_priv;
+
+ if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+ CAM_ERR(CAM_ISP, "Failed! Invalid data");
+ return -EINVAL;
+ }
+
+ if (update_buf->wm_update->num_buf != vfe_out_data->num_wm) {
+ CAM_ERR(CAM_ISP,
+ "Failed! Invalid number buffers:%d required:%d",
+ update_buf->wm_update->num_buf, vfe_out_data->num_wm);
+ return -EINVAL;
+ }
+
+ reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+ io_cfg = update_buf->wm_update->io_cfg;
+
+ for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+ if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+ CAM_ERR(CAM_ISP,
+ "reg_val_pair %d exceeds the array limit %zu",
+ j, MAX_REG_VAL_PAIR_SIZE);
+ return -ENOMEM;
+ }
+
+ wm_data = vfe_out_data->wm_res[i]->res_priv;
+ ubwc_client = wm_data->hw_regs->ubwc_regs;
+ /* update width register */
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->buffer_width_cfg,
+ wm_data->width);
+ CAM_DBG(CAM_ISP, "WM %d image width 0x%x",
+ wm_data->index, reg_val_pair[j-1]);
+
+ /* For initial configuration program all bus registers */
+ val = io_cfg->planes[i].plane_stride;
+ CAM_DBG(CAM_ISP, "before stride %d", val);
+ val = ALIGNUP(val, 16);
+ if (val != io_cfg->planes[i].plane_stride &&
+ val != wm_data->stride)
+ CAM_WARN(CAM_ISP,
+ "Warning stride %u expected %u",
+ io_cfg->planes[i].plane_stride,
+ val);
+
+ if ((wm_data->stride != val ||
+ !wm_data->init_cfg_done) && (wm_data->index >= 3)) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->stride,
+ io_cfg->planes[i].plane_stride);
+ wm_data->stride = val;
+ CAM_DBG(CAM_ISP, "WM %d image stride 0x%x",
+ wm_data->index, reg_val_pair[j-1]);
+ }
+
+ if (wm_data->en_ubwc) {
+ if (!wm_data->hw_regs->ubwc_regs) {
+ CAM_ERR(CAM_ISP,
+ "No UBWC register to configure.");
+ return -EINVAL;
+ }
+ if (wm_data->ubwc_updated) {
+ wm_data->ubwc_updated = false;
+ cam_vfe_bus_update_ubwc_regs(
+ wm_data, reg_val_pair, i, &j);
+ }
+
+ /* UBWC meta address */
+ cam_vfe_bus_update_ubwc_meta_addr(
+ reg_val_pair, &j,
+ wm_data->hw_regs->ubwc_regs,
+ update_buf->wm_update->image_buf[i]);
+ CAM_DBG(CAM_ISP, "WM %d ubwc meta addr 0x%llx",
+ wm_data->index,
+ update_buf->wm_update->image_buf[i]);
+ }
+
+ if (wm_data->en_ubwc) {
+ frame_inc = ALIGNUP(io_cfg->planes[i].plane_stride *
+ io_cfg->planes[i].slice_height, 4096);
+ frame_inc += io_cfg->planes[i].meta_size;
+ CAM_DBG(CAM_ISP,
+ "WM %d frm %d: ht: %d stride %d meta: %d",
+ wm_data->index, frame_inc,
+ io_cfg->planes[i].slice_height,
+ io_cfg->planes[i].plane_stride,
+ io_cfg->planes[i].meta_size);
+ } else {
+ frame_inc = io_cfg->planes[i].plane_stride *
+ io_cfg->planes[i].slice_height;
+ }
+
+ if (wm_data->index < 3)
+ loop_size = wm_data->irq_subsample_period + 1;
+ else
+ loop_size = 1;
+
+ /* WM Image address */
+ for (k = 0; k < loop_size; k++) {
+ if (wm_data->en_ubwc)
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->image_addr,
+ update_buf->wm_update->image_buf[i] +
+ io_cfg->planes[i].meta_size +
+ k * frame_inc);
+ else
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->image_addr,
+ update_buf->wm_update->image_buf[i] +
+ wm_data->offset + k * frame_inc);
+ CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
+ wm_data->index, reg_val_pair[j-1]);
+ }
+
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->frame_inc, frame_inc);
+ CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
+ wm_data->index, reg_val_pair[j-1]);
+
+
+ /* enable the WM */
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->cfg,
+ wm_data->en_cfg);
+
+ /* set initial configuration done */
+ if (!wm_data->init_cfg_done)
+ wm_data->init_cfg_done = true;
+ }
+
+ size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
+
+ /* cdm util returns dwords, need to convert to bytes */
+ if ((size * 4) > update_buf->cmd.size) {
+ CAM_ERR(CAM_ISP,
+ "Failed! Buf size:%d insufficient, expected size:%d",
+ update_buf->cmd.size, size);
+ return -ENOMEM;
+ }
+
+ vfe_out_data->cdm_util_ops->cdm_write_regrandom(
+ update_buf->cmd.cmd_buf_addr, j/2, reg_val_pair);
+
+ /* cdm util returns dwords, need to convert to bytes */
+ update_buf->cmd.used_bytes = size * 4;
+
+ return 0;
+}
+
+static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
+ uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv;
+ struct cam_isp_hw_get_cmd_update *update_hfr;
+ struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL;
+ struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
+ struct cam_isp_port_hfr_config *hfr_cfg = NULL;
+ uint32_t *reg_val_pair;
+ uint32_t i, j, size = 0;
+
+ bus_priv = (struct cam_vfe_bus_ver2_priv *) priv;
+ update_hfr = (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+ vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+ update_hfr->res->res_priv;
+
+ if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+ CAM_ERR(CAM_ISP, "Failed! Invalid data");
+ return -EINVAL;
+ }
+
+ reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+ hfr_cfg = update_hfr->hfr_update;
+
+ for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+ if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+ CAM_ERR(CAM_ISP,
+ "reg_val_pair %d exceeds the array limit %zu",
+ j, MAX_REG_VAL_PAIR_SIZE);
+ return -ENOMEM;
+ }
+
+ wm_data = vfe_out_data->wm_res[i]->res_priv;
+
+ if (wm_data->index <= 2 && hfr_cfg->subsample_period > 3) {
+ CAM_ERR(CAM_ISP,
+ "RDI doesn't support irq subsample period %d",
+ hfr_cfg->subsample_period);
+ return -EINVAL;
+ }
+
+ if ((wm_data->framedrop_pattern !=
+ hfr_cfg->framedrop_pattern) ||
+ !wm_data->hfr_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->framedrop_pattern,
+ hfr_cfg->framedrop_pattern);
+ wm_data->framedrop_pattern = hfr_cfg->framedrop_pattern;
+ CAM_DBG(CAM_ISP, "WM %d framedrop pattern 0x%x",
+ wm_data->index, wm_data->framedrop_pattern);
+ }
+
+ if (wm_data->framedrop_period != hfr_cfg->framedrop_period ||
+ !wm_data->hfr_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->framedrop_period,
+ hfr_cfg->framedrop_period);
+ wm_data->framedrop_period = hfr_cfg->framedrop_period;
+ CAM_DBG(CAM_ISP, "WM %d framedrop period 0x%x",
+ wm_data->index, wm_data->framedrop_period);
+ }
+
+ if (wm_data->irq_subsample_period != hfr_cfg->subsample_period
+ || !wm_data->hfr_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->irq_subsample_period,
+ hfr_cfg->subsample_period);
+ wm_data->irq_subsample_period =
+ hfr_cfg->subsample_period;
+ CAM_DBG(CAM_ISP, "WM %d irq subsample period 0x%x",
+ wm_data->index, wm_data->irq_subsample_period);
+ }
+
+ if (wm_data->irq_subsample_pattern != hfr_cfg->subsample_pattern
+ || !wm_data->hfr_cfg_done) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->irq_subsample_pattern,
+ hfr_cfg->subsample_pattern);
+ wm_data->irq_subsample_pattern =
+ hfr_cfg->subsample_pattern;
+ CAM_DBG(CAM_ISP, "WM %d irq subsample pattern 0x%x",
+ wm_data->index, wm_data->irq_subsample_pattern);
+ }
+
+ /* set initial configuration done */
+ if (!wm_data->hfr_cfg_done)
+ wm_data->hfr_cfg_done = true;
+ }
+
+ size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
+
+ /* cdm util returns dwords, need to convert to bytes */
+ if ((size * 4) > update_hfr->cmd.size) {
+ CAM_ERR(CAM_ISP,
+ "Failed! Buf size:%d insufficient, expected size:%d",
+ update_hfr->cmd.size, size);
+ return -ENOMEM;
+ }
+
+ vfe_out_data->cdm_util_ops->cdm_write_regrandom(
+ update_hfr->cmd.cmd_buf_addr, j/2, reg_val_pair);
+
+ /* cdm util returns dwords, need to convert to bytes */
+ update_hfr->cmd.used_bytes = size * 4;
+
+ return 0;
+}
+
+static int cam_vfe_bus_update_ubwc_config(void *cmd_args)
+{
+ struct cam_isp_hw_get_cmd_update *update_ubwc;
+ struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL;
+ struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
+ struct cam_ubwc_plane_cfg_v1 *ubwc_plane_cfg = NULL;
+ uint32_t i;
+ int rc = 0;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ update_ubwc = (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+ vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+ update_ubwc->res->res_priv;
+
+ if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+ CAM_ERR(CAM_ISP, "Invalid data");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ ubwc_plane_cfg = update_ubwc->ubwc_update;
+
+ for (i = 0; i < vfe_out_data->num_wm; i++) {
+
+ wm_data = vfe_out_data->wm_res[i]->res_priv;
+ if (i > 0)
+ ubwc_plane_cfg++;
+
+ if (!wm_data->hw_regs->ubwc_regs) {
+ CAM_ERR(CAM_ISP,
+ "No UBWC register to configure.");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!wm_data->en_ubwc) {
+ CAM_ERR(CAM_ISP, "UBWC Disabled");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (wm_data->packer_cfg !=
+ ubwc_plane_cfg->packer_config ||
+ !wm_data->init_cfg_done) {
+ wm_data->packer_cfg = ubwc_plane_cfg->packer_config;
+ wm_data->ubwc_updated = true;
+ }
+
+ if ((!wm_data->is_dual) && ((wm_data->tile_cfg !=
+ ubwc_plane_cfg->tile_config)
+ || !wm_data->init_cfg_done)) {
+ wm_data->tile_cfg = ubwc_plane_cfg->tile_config;
+ wm_data->ubwc_updated = true;
+ }
+
+ if ((!wm_data->is_dual) && ((wm_data->h_init !=
+ ubwc_plane_cfg->h_init) ||
+ !wm_data->init_cfg_done)) {
+ wm_data->h_init = ubwc_plane_cfg->h_init;
+ wm_data->ubwc_updated = true;
+ }
+
+ if (wm_data->v_init != ubwc_plane_cfg->v_init ||
+ !wm_data->init_cfg_done) {
+ wm_data->v_init = ubwc_plane_cfg->v_init;
+ wm_data->ubwc_updated = true;
+ }
+
+ if (wm_data->ubwc_meta_stride !=
+ ubwc_plane_cfg->meta_stride ||
+ !wm_data->init_cfg_done) {
+ wm_data->ubwc_meta_stride = ubwc_plane_cfg->meta_stride;
+ wm_data->ubwc_updated = true;
+ }
+
+ if (wm_data->ubwc_mode_cfg_0 !=
+ ubwc_plane_cfg->mode_config_0 ||
+ !wm_data->init_cfg_done) {
+ wm_data->ubwc_mode_cfg_0 =
+ ubwc_plane_cfg->mode_config_0;
+ wm_data->ubwc_updated = true;
+ }
+
+ if (wm_data->ubwc_mode_cfg_1 !=
+ ubwc_plane_cfg->mode_config_1 ||
+ !wm_data->init_cfg_done) {
+ wm_data->ubwc_mode_cfg_1 =
+ ubwc_plane_cfg->mode_config_1;
+ wm_data->ubwc_updated = true;
+ }
+
+ if (wm_data->ubwc_meta_offset !=
+ ubwc_plane_cfg->meta_offset ||
+ !wm_data->init_cfg_done) {
+ wm_data->ubwc_meta_offset = ubwc_plane_cfg->meta_offset;
+ wm_data->ubwc_updated = true;
+ }
+ }
+
+end:
+ return rc;
+}
+
+
+static int cam_vfe_bus_update_stripe_cfg(void *priv, void *cmd_args,
+ uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv;
+ struct cam_isp_hw_dual_isp_update_args *stripe_args;
+ struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL;
+ struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
+ struct cam_isp_dual_stripe_config *stripe_config;
+ uint32_t outport_id, ports_plane_idx, i;
+
+ bus_priv = (struct cam_vfe_bus_ver2_priv *) priv;
+ stripe_args = (struct cam_isp_hw_dual_isp_update_args *)cmd_args;
+
+ vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+ stripe_args->res->res_priv;
+
+ if (!vfe_out_data) {
+ CAM_ERR(CAM_ISP, "Failed! Invalid data");
+ return -EINVAL;
+ }
+
+ outport_id = stripe_args->res->res_id & 0xFF;
+ if (stripe_args->res->res_id < CAM_ISP_IFE_OUT_RES_BASE ||
+ stripe_args->res->res_id >= CAM_ISP_IFE_OUT_RES_MAX)
+ return 0;
+
+ ports_plane_idx = (stripe_args->split_id *
+ (stripe_args->dual_cfg->num_ports * CAM_PACKET_MAX_PLANES)) +
+ (outport_id * CAM_PACKET_MAX_PLANES);
+ for (i = 0; i < vfe_out_data->num_wm; i++) {
+ wm_data = vfe_out_data->wm_res[i]->res_priv;
+ stripe_config = (struct cam_isp_dual_stripe_config *)
+ &stripe_args->dual_cfg->stripes[ports_plane_idx + i];
+ wm_data->width = stripe_config->width;
+ wm_data->offset = stripe_config->offset;
+ wm_data->tile_cfg = stripe_config->tileconfig;
+ CAM_DBG(CAM_ISP, "id:%x wm:%d width:0x%x offset:%x tilecfg:%x",
+ stripe_args->res->res_id, i, wm_data->width,
+ wm_data->offset, wm_data->tile_cfg);
+ }
+
+ return 0;
+}
+
+static int cam_vfe_bus_start_hw(void *hw_priv,
+ void *start_hw_args, uint32_t arg_size)
+{
+ return cam_vfe_bus_start_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_stop_hw(void *hw_priv,
+ void *stop_hw_args, uint32_t arg_size)
+{
+ return cam_vfe_bus_stop_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_init_hw(void *hw_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv;
+ uint32_t top_irq_reg_mask[2] = {0};
+
+ if (!bus_priv) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ return -EINVAL;
+ }
+
+ top_irq_reg_mask[0] = (1 << 9);
+
+ bus_priv->irq_handle = cam_irq_controller_subscribe_irq(
+ bus_priv->common_data.vfe_irq_controller,
+ CAM_IRQ_PRIORITY_2,
+ top_irq_reg_mask,
+ bus_priv,
+ cam_vfe_bus_ver2_handle_irq,
+ NULL,
+ NULL,
+ NULL);
+
+ if (bus_priv->irq_handle <= 0) {
+ CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+ return -EFAULT;
+ }
+
+ bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq(
+ bus_priv->common_data.bus_irq_controller,
+ CAM_IRQ_PRIORITY_0,
+ bus_error_irq_mask,
+ bus_priv,
+ cam_vfe_bus_error_irq_top_half,
+ cam_vfe_bus_err_bottom_half,
+ bus_priv->tasklet_info,
+ &tasklet_bh_api);
+
+ if (bus_priv->irq_handle <= 0) {
+ CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+ return -EFAULT;
+ }
+
+ /*Set Debug Registers*/
+ cam_io_w_mb(CAM_VFE_BUS_SET_DEBUG_REG, bus_priv->common_data.mem_base +
+ bus_priv->common_data.common_reg->debug_status_cfg);
+
+ /* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
+ cam_io_w_mb(0x0, bus_priv->common_data.mem_base +
+ bus_priv->common_data.common_reg->addr_sync_frame_hdr);
+
+ /* no clock gating at bus input */
+ cam_io_w_mb(0xFFFFF, bus_priv->common_data.mem_base + 0x0000200C);
+
+ /* BUS_WR_TEST_BUS_CTRL */
+ cam_io_w_mb(0x0, bus_priv->common_data.mem_base + 0x0000211C);
+
+ /* if addr_no_sync has default value then config the addr no sync reg */
+ cam_io_w_mb(CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL,
+ bus_priv->common_data.mem_base +
+ bus_priv->common_data.common_reg->addr_sync_no_sync);
+
+ return 0;
+}
+
+static int cam_vfe_bus_deinit_hw(void *hw_priv,
+ void *deinit_hw_args, uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv;
+ int rc = 0, i;
+
+ if (!bus_priv) {
+ CAM_ERR(CAM_ISP, "Error: Invalid args");
+ return -EINVAL;
+ }
+
+ if (bus_priv->error_irq_handle) {
+ rc = cam_irq_controller_unsubscribe_irq(
+ bus_priv->common_data.bus_irq_controller,
+ bus_priv->error_irq_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Failed to unsubscribe error irq rc=%d", rc);
+
+ bus_priv->error_irq_handle = 0;
+ }
+
+ if (bus_priv->irq_handle) {
+ rc = cam_irq_controller_unsubscribe_irq(
+ bus_priv->common_data.vfe_irq_controller,
+ bus_priv->irq_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Failed to unsubscribe irq rc=%d", rc);
+
+ bus_priv->irq_handle = 0;
+ }
+
+ INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+ for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) {
+ INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+ list_add_tail(&bus_priv->common_data.evt_payload[i].list,
+ &bus_priv->common_data.free_payload_list);
+ }
+
+ return rc;
+}
+
+static int __cam_vfe_bus_process_cmd(void *priv,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+ return cam_vfe_bus_process_cmd(priv, cmd_type, cmd_args, arg_size);
+}
+
+static int cam_vfe_bus_process_cmd(
+ struct cam_isp_resource_node *priv,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+ int rc = -EINVAL;
+ struct cam_vfe_bus_ver2_priv *bus_priv;
+
+ if (!priv || !cmd_args) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments");
+ return -EINVAL;
+ }
+
+ switch (cmd_type) {
+ case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+ rc = cam_vfe_bus_update_wm(priv, cmd_args, arg_size);
+ break;
+ case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+ rc = cam_vfe_bus_update_hfr(priv, cmd_args, arg_size);
+ break;
+ case CAM_ISP_HW_CMD_GET_SECURE_MODE:
+ rc = cam_vfe_bus_get_secure_mode(priv, cmd_args, arg_size);
+ break;
+ case CAM_ISP_HW_CMD_STRIPE_UPDATE:
+ rc = cam_vfe_bus_update_stripe_cfg(priv, cmd_args, arg_size);
+ break;
+ case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ:
+ bus_priv = (struct cam_vfe_bus_ver2_priv *) priv;
+ if (bus_priv->error_irq_handle) {
+ CAM_DBG(CAM_ISP, "Mask off bus error irq handler");
+ rc = cam_irq_controller_unsubscribe_irq(
+ bus_priv->common_data.bus_irq_controller,
+ bus_priv->error_irq_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Failed to unsubscribe error irq rc=%d",
+ rc);
+
+ bus_priv->error_irq_handle = 0;
+ }
+ break;
+ case CAM_ISP_HW_CMD_UBWC_UPDATE:
+ rc = cam_vfe_bus_update_ubwc_config(cmd_args);
+ break;
+ default:
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d",
+ cmd_type);
+ break;
+ }
+
+ return rc;
+}
+
+int cam_vfe_bus_ver2_init(
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ void *bus_hw_info,
+ void *vfe_irq_controller,
+ struct cam_vfe_bus **vfe_bus)
+{
+ int i, rc = 0;
+ struct cam_vfe_bus_ver2_priv *bus_priv = NULL;
+ struct cam_vfe_bus *vfe_bus_local;
+ struct cam_vfe_bus_ver2_hw_info *ver2_hw_info = bus_hw_info;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) {
+ CAM_ERR(CAM_ISP,
+ "Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK",
+ soc_info, hw_intf, bus_hw_info);
+ CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ vfe_bus_local = kzalloc(sizeof(struct cam_vfe_bus), GFP_KERNEL);
+ if (!vfe_bus_local) {
+ CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ bus_priv = kzalloc(sizeof(struct cam_vfe_bus_ver2_priv),
+ GFP_KERNEL);
+ if (!bus_priv) {
+ CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv");
+ rc = -ENOMEM;
+ goto free_bus_local;
+ }
+ vfe_bus_local->bus_priv = bus_priv;
+
+ bus_priv->num_client = ver2_hw_info->num_client;
+ bus_priv->num_out = ver2_hw_info->num_out;
+ bus_priv->common_data.num_sec_out = 0;
+ bus_priv->common_data.secure_mode = CAM_SECURE_MODE_NON_SECURE;
+ bus_priv->common_data.core_index = soc_info->index;
+ bus_priv->common_data.mem_base =
+ CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX);
+ bus_priv->common_data.hw_intf = hw_intf;
+ bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
+ bus_priv->common_data.common_reg = &ver2_hw_info->common_reg;
+ bus_priv->common_data.addr_no_sync =
+ CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL;
+
+ mutex_init(&bus_priv->common_data.bus_mutex);
+
+ rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
+ &ver2_hw_info->common_reg.irq_reg_info,
+ &bus_priv->common_data.bus_irq_controller);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "cam_irq_controller_init failed");
+ goto free_bus_priv;
+ }
+
+ INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+ INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
+ INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+ for (i = 0; i < bus_priv->num_client; i++) {
+ rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
+ &bus_priv->bus_client[i]);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "Init WM failed rc=%d", rc);
+ goto deinit_wm;
+ }
+ }
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
+ rc = cam_vfe_bus_init_comp_grp(i, bus_priv, bus_hw_info,
+ &bus_priv->comp_grp[i]);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc);
+ goto deinit_comp_grp;
+ }
+ }
+
+ for (i = 0; i < bus_priv->num_out; i++) {
+ rc = cam_vfe_bus_init_vfe_out_resource(i, bus_priv,
+ bus_hw_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "Init VFE Out failed rc=%d", rc);
+ goto deinit_vfe_out;
+ }
+ }
+
+ spin_lock_init(&bus_priv->common_data.spin_lock);
+ INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+ for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) {
+ INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+ list_add_tail(&bus_priv->common_data.evt_payload[i].list,
+ &bus_priv->common_data.free_payload_list);
+ }
+
+ vfe_bus_local->hw_ops.reserve = cam_vfe_bus_acquire_vfe_out;
+ vfe_bus_local->hw_ops.release = cam_vfe_bus_release_vfe_out;
+ vfe_bus_local->hw_ops.start = cam_vfe_bus_start_hw;
+ vfe_bus_local->hw_ops.stop = cam_vfe_bus_stop_hw;
+ vfe_bus_local->hw_ops.init = cam_vfe_bus_init_hw;
+ vfe_bus_local->hw_ops.deinit = cam_vfe_bus_deinit_hw;
+ vfe_bus_local->top_half_handler = cam_vfe_bus_ver2_handle_irq;
+ vfe_bus_local->bottom_half_handler = NULL;
+ vfe_bus_local->hw_ops.process_cmd = __cam_vfe_bus_process_cmd;
+
+ *vfe_bus = vfe_bus_local;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
+
+deinit_vfe_out:
+ if (i < 0)
+ i = CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+
+deinit_comp_grp:
+ if (i < 0)
+ i = CAM_VFE_BUS_VER2_COMP_GRP_MAX;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+
+deinit_wm:
+ if (i < 0)
+ i = bus_priv->num_client;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+
+free_bus_priv:
+ kfree(vfe_bus_local->bus_priv);
+
+free_bus_local:
+ kfree(vfe_bus_local);
+
+end:
+ return rc;
+}
+
+int cam_vfe_bus_ver2_deinit(
+ struct cam_vfe_bus **vfe_bus)
+{
+ int i, rc = 0;
+ struct cam_vfe_bus_ver2_priv *bus_priv = NULL;
+ struct cam_vfe_bus *vfe_bus_local;
+
+ if (!vfe_bus || !*vfe_bus) {
+ CAM_ERR(CAM_ISP, "Invalid input");
+ return -EINVAL;
+ }
+ vfe_bus_local = *vfe_bus;
+
+ bus_priv = vfe_bus_local->bus_priv;
+ if (!bus_priv) {
+ CAM_ERR(CAM_ISP, "bus_priv is NULL");
+ rc = -ENODEV;
+ goto free_bus_local;
+ }
+
+ INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+ for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++)
+ INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+
+ for (i = 0; i < bus_priv->num_client; i++) {
+ rc = cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Deinit WM failed rc=%d", rc);
+ }
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
+ rc = cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Deinit Comp Grp failed rc=%d", rc);
+ }
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) {
+ rc = cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Deinit VFE Out failed rc=%d", rc);
+ }
+
+ INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+ INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
+ INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+ rc = cam_irq_controller_deinit(
+ &bus_priv->common_data.bus_irq_controller);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Deinit IRQ Controller failed rc=%d", rc);
+
+ mutex_destroy(&bus_priv->common_data.bus_mutex);
+ kfree(vfe_bus_local->bus_priv);
+
+free_bus_local:
+ kfree(vfe_bus_local);
+
+ *vfe_bus = NULL;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
new file mode 100644
index 000000000000..73b7eb2dc59d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -0,0 +1,234 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_BUS_VER2_H_
+#define _CAM_VFE_BUS_VER2_H_
+
+#include "cam_irq_controller.h"
+#include "cam_vfe_bus.h"
+
+#define CAM_VFE_BUS_VER2_MAX_CLIENTS 24
+
+enum cam_vfe_bus_ver2_vfe_core_id {
+ CAM_VFE_BUS_VER2_VFE_CORE_0,
+ CAM_VFE_BUS_VER2_VFE_CORE_1,
+ CAM_VFE_BUS_VER2_VFE_CORE_MAX,
+};
+
+enum cam_vfe_bus_ver2_comp_grp_type {
+ CAM_VFE_BUS_VER2_COMP_GRP_0,
+ CAM_VFE_BUS_VER2_COMP_GRP_1,
+ CAM_VFE_BUS_VER2_COMP_GRP_2,
+ CAM_VFE_BUS_VER2_COMP_GRP_3,
+ CAM_VFE_BUS_VER2_COMP_GRP_4,
+ CAM_VFE_BUS_VER2_COMP_GRP_5,
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0,
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1,
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2,
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3,
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4,
+ CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5,
+ CAM_VFE_BUS_VER2_COMP_GRP_MAX,
+};
+
+enum cam_vfe_bus_ver2_vfe_out_type {
+ CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+ CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+ CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+ CAM_VFE_BUS_VER2_VFE_OUT_RDI3,
+ CAM_VFE_BUS_VER2_VFE_OUT_FULL,
+ CAM_VFE_BUS_VER2_VFE_OUT_DS4,
+ CAM_VFE_BUS_VER2_VFE_OUT_DS16,
+ CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP,
+ CAM_VFE_BUS_VER2_VFE_OUT_FD,
+ CAM_VFE_BUS_VER2_VFE_OUT_PDAF,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+ CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP,
+ CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP,
+ CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP,
+ CAM_VFE_BUS_VER2_VFE_OUT_2PD,
+ CAM_VFE_BUS_VER2_VFE_OUT_MAX,
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_common:
+ *
+ * @Brief: Common registers across all BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_common {
+ uint32_t hw_version;
+ uint32_t hw_capability;
+ uint32_t sw_reset;
+ uint32_t cgc_ovd;
+ uint32_t pwr_iso_cfg;
+ uint32_t dual_master_comp_cfg;
+ struct cam_irq_controller_reg_info irq_reg_info;
+ uint32_t comp_error_status;
+ uint32_t comp_ovrwr_status;
+ uint32_t dual_comp_error_status;
+ uint32_t dual_comp_ovrwr_status;
+ uint32_t addr_sync_cfg;
+ uint32_t addr_sync_frame_hdr;
+ uint32_t addr_sync_no_sync;
+ uint32_t debug_status_cfg;
+ uint32_t debug_status_0;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_ubwc_client:
+ *
+ * @Brief: UBWC register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_ubwc_client {
+ uint32_t tile_cfg;
+ uint32_t h_init;
+ uint32_t v_init;
+ uint32_t meta_addr;
+ uint32_t meta_offset;
+ uint32_t meta_stride;
+ uint32_t mode_cfg_0;
+ uint32_t bw_limit;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_ubwc_client:
+ *
+ * @Brief: UBWC register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client {
+ uint32_t tile_cfg;
+ uint32_t h_init;
+ uint32_t v_init;
+ uint32_t meta_addr;
+ uint32_t meta_offset;
+ uint32_t meta_stride;
+ uint32_t mode_cfg_0;
+ uint32_t mode_cfg_1;
+ uint32_t bw_limit;
+};
+
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_bus_client:
+ *
+ * @Brief: Register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_bus_client {
+ uint32_t status0;
+ uint32_t status1;
+ uint32_t cfg;
+ uint32_t header_addr;
+ uint32_t header_cfg;
+ uint32_t image_addr;
+ uint32_t image_addr_offset;
+ uint32_t buffer_width_cfg;
+ uint32_t buffer_height_cfg;
+ uint32_t packer_cfg;
+ uint32_t stride;
+ uint32_t irq_subsample_period;
+ uint32_t irq_subsample_pattern;
+ uint32_t framedrop_period;
+ uint32_t framedrop_pattern;
+ uint32_t frame_inc;
+ uint32_t burst_limit;
+ void *ubwc_regs;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_comp_grp:
+ *
+ * @Brief: Register offsets for Composite Group registers
+ * comp_mask: Comp group register address
+ * addr_sync_mask:Address sync group register address
+ */
+struct cam_vfe_bus_ver2_reg_offset_comp_grp {
+ uint32_t comp_mask;
+ uint32_t addr_sync_mask;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_vfe_out_hw_info:
+ *
+ * @Brief: HW capability of VFE Bus Client
+ */
+struct cam_vfe_bus_ver2_vfe_out_hw_info {
+ enum cam_vfe_bus_ver2_vfe_out_type vfe_out_type;
+ uint32_t max_width;
+ uint32_t max_height;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_hw_info:
+ *
+ * @Brief: HW register info for entire Bus
+ *
+ * @common_reg: Common register details
+ * @bus_client_reg: Bus client register info
+ * @comp_reg_grp: Composite group register info
+ * @vfe_out_hw_info: VFE output capability
+ */
+struct cam_vfe_bus_ver2_hw_info {
+ struct cam_vfe_bus_ver2_reg_offset_common common_reg;
+ uint32_t num_client;
+ struct cam_vfe_bus_ver2_reg_offset_bus_client
+ bus_client_reg[CAM_VFE_BUS_VER2_MAX_CLIENTS];
+ struct cam_vfe_bus_ver2_reg_offset_comp_grp
+ comp_grp_reg[CAM_VFE_BUS_VER2_COMP_GRP_MAX];
+ uint32_t num_out;
+ struct cam_vfe_bus_ver2_vfe_out_hw_info
+ vfe_out_hw_info[CAM_VFE_BUS_VER2_VFE_OUT_MAX];
+};
+
+/*
+ * cam_vfe_bus_ver2_init()
+ *
+ * @Brief: Initialize Bus layer
+ *
+ * @soc_info: Soc Information for the associated HW
+ * @hw_intf: HW Interface of HW to which this resource belongs
+ * @bus_hw_info: BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top
+ * level IRQs
+ * @vfe_bus: Pointer to vfe_bus structure which will be filled
+ * and returned on successful initialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_bus_ver2_init(
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ void *bus_hw_info,
+ void *vfe_irq_controller,
+ struct cam_vfe_bus **vfe_bus);
+
+/*
+ * cam_vfe_bus_ver2_deinit()
+ *
+ * @Brief: Deinitialize Bus layer
+ *
+ * @vfe_bus: Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_bus_ver2_deinit(struct cam_vfe_bus **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
new file mode 100644
index 000000000000..6f88bc7f5a99
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_BUS_H_
+#define _CAM_VFE_BUS_H_
+
+#include <uapi/media/cam_isp.h>
+#include "cam_hw_intf.h"
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_BUS_VER_1_0 0x1000
+#define CAM_VFE_BUS_VER_2_0 0x2000
+
+enum cam_vfe_bus_plane_type {
+ PLANE_Y,
+ PLANE_C,
+ PLANE_MAX,
+};
+
+/*
+ * struct cam_vfe_bus:
+ *
+ * @Brief: Bus interface structure
+ *
+ * @bus_priv: Private data of BUS
+ * @hw_ops: Hardware interface functions
+ * @top_half_handler: Top Half handler function
+ * @bottom_half_handler: Bottom Half handler function
+ */
+struct cam_vfe_bus {
+ void *bus_priv;
+
+ struct cam_hw_ops hw_ops;
+ CAM_IRQ_HANDLER_TOP_HALF top_half_handler;
+ CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler;
+};
+
+/*
+ * cam_vfe_bus_init()
+ *
+ * @Brief: Initialize Bus layer
+ *
+ * @bus_version: Version of BUS to initialize
+ * @soc_info: Soc Information for the associated HW
+ * @hw_intf: HW Interface of HW to which this resource belongs
+ * @bus_hw_info: BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top
+ * level IRQs
+ * @vfe_bus: Pointer to vfe_bus structure which will be filled
+ * and returned on successful initialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_bus_init(uint32_t bus_version,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ void *bus_hw_info,
+ void *vfe_irq_controller,
+ struct cam_vfe_bus **vfe_bus);
+
+/*
+ * cam_vfe_bus_deinit()
+ *
+ * @Brief: Deinitialize Bus layer
+ *
+ * @bus_version: Version of BUS to deinitialize
+ * @vfe_bus: Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_bus_deinit(uint32_t bus_version,
+ struct cam_vfe_bus **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
new file mode 100644
index 000000000000..cdb6b2867433
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_camif_lite_ver2.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_top.o cam_vfe_top_ver2.o cam_vfe_camif_ver2.o cam_vfe_rdi.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
new file mode 100644
index 000000000000..64b77eb7ccb8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_soc.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_camif_lite_ver2.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+
+struct cam_vfe_mux_camif_lite_data {
+ void __iomem *mem_base;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_camif_lite_ver2_reg *camif_lite_reg;
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_camif_lite_ver2_reg_data *reg_data;
+ struct cam_hw_soc_info *soc_info;
+ enum cam_isp_hw_sync_mode sync_mode;
+};
+
+static int cam_vfe_camif_lite_get_reg_update(
+ struct cam_isp_resource_node *camif_lite_res,
+ void *cmd_args,
+ uint32_t arg_size)
+{
+ uint32_t size = 0;
+ uint32_t reg_val_pair[2];
+ struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
+ struct cam_cdm_utils_ops *cdm_util_ops = NULL;
+ struct cam_vfe_mux_camif_lite_data *rsrc_data = NULL;
+
+ if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+ CAM_ERR(CAM_ISP, "Invalid cmd size");
+ return -EINVAL;
+ }
+
+ if (!cdm_args || !cdm_args->res) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ return -EINVAL;
+ }
+
+ cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+ if (!cdm_util_ops) {
+ CAM_ERR(CAM_ISP, "Invalid CDM ops");
+ return -EINVAL;
+ }
+
+ size = cdm_util_ops->cdm_required_size_reg_random(1);
+ /* since cdm returns dwords, we need to convert it into bytes */
+ if ((size * 4) > cdm_args->cmd.size) {
+ CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+ cdm_args->cmd.size, size);
+ return -EINVAL;
+ }
+
+ rsrc_data = camif_lite_res->res_priv;
+ reg_val_pair[0] = rsrc_data->camif_lite_reg->reg_update_cmd;
+ reg_val_pair[1] = rsrc_data->reg_data->dual_pd_reg_update_cmd_data;
+ CAM_DBG(CAM_ISP, "CAMIF Lite reg_update_cmd %x offset %x",
+ reg_val_pair[1], reg_val_pair[0]);
+
+ cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
+ 1, reg_val_pair);
+
+ cdm_args->cmd.used_bytes = size * 4;
+
+ return 0;
+}
+
+int cam_vfe_camif_lite_ver2_acquire_resource(
+ struct cam_isp_resource_node *camif_lite_res,
+ void *acquire_param)
+{
+ struct cam_vfe_mux_camif_lite_data *camif_lite_data;
+ struct cam_vfe_acquire_args *acquire_data;
+ int rc = 0;
+
+ if (!camif_lite_res) {
+ CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+ return -EINVAL;
+ }
+
+ camif_lite_data = (struct cam_vfe_mux_camif_lite_data *)
+ camif_lite_res->res_priv;
+ acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
+
+ camif_lite_data->sync_mode = acquire_data->vfe_in.sync_mode;
+
+ CAM_DBG(CAM_ISP, "hw id:%d sync_mode=%d",
+ camif_lite_res->hw_intf->hw_idx,
+ camif_lite_data->sync_mode);
+ return rc;
+}
+
+static int cam_vfe_camif_lite_resource_start(
+ struct cam_isp_resource_node *camif_lite_res)
+{
+ struct cam_vfe_mux_camif_lite_data *rsrc_data;
+ uint32_t val = 0;
+
+ if (!camif_lite_res) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ if (camif_lite_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_ERR(CAM_ISP, "Error! Invalid camif lite res res_state:%d",
+ camif_lite_res->res_state);
+ return -EINVAL;
+ }
+
+ rsrc_data = (struct cam_vfe_mux_camif_lite_data *)
+ camif_lite_res->res_priv;
+
+ /* vfe core config */
+ val = cam_io_r_mb(rsrc_data->mem_base +
+ rsrc_data->common_reg->core_cfg);
+
+ if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+ val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
+
+ val |= (1 << rsrc_data->reg_data->dual_pd_path_sel_shift);
+
+ cam_io_w_mb(val, rsrc_data->mem_base +
+ rsrc_data->common_reg->core_cfg);
+
+ CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d",
+ camif_lite_res->hw_intf->hw_idx, val);
+
+ /* epoch config with 20 line */
+ cam_io_w_mb(rsrc_data->reg_data->lite_epoch_line_cfg,
+ rsrc_data->mem_base +
+ rsrc_data->camif_lite_reg->lite_epoch_irq);
+
+ camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+ /* Reg Update */
+ cam_io_w_mb(rsrc_data->reg_data->dual_pd_reg_update_cmd_data,
+ rsrc_data->mem_base +
+ rsrc_data->camif_lite_reg->reg_update_cmd);
+ CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d",
+ camif_lite_res->hw_intf->hw_idx,
+ rsrc_data->reg_data->dual_pd_reg_update_cmd_data);
+
+ CAM_DBG(CAM_ISP, "Start Camif Lite IFE %d Done",
+ camif_lite_res->hw_intf->hw_idx);
+ return 0;
+}
+
+static int cam_vfe_camif_lite_resource_stop(
+ struct cam_isp_resource_node *camif_lite_res)
+{
+ struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
+ struct cam_vfe_camif_lite_ver2_reg *camif_lite_reg;
+ int rc = 0;
+ uint32_t val = 0;
+
+ if (!camif_lite_res) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ if ((camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) ||
+ (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE))
+ return 0;
+
+ camif_lite_priv = (struct cam_vfe_mux_camif_lite_data *)
+ camif_lite_res->res_priv;
+ camif_lite_reg = camif_lite_priv->camif_lite_reg;
+
+ val = cam_io_r_mb(camif_lite_priv->mem_base +
+ camif_lite_priv->common_reg->core_cfg);
+ val &= (~(1 << camif_lite_priv->reg_data->dual_pd_path_sel_shift));
+ cam_io_w_mb(val, camif_lite_priv->mem_base +
+ camif_lite_priv->common_reg->core_cfg);
+
+ if (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+ camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+ return rc;
+}
+
+static int cam_vfe_camif_lite_process_cmd(
+ struct cam_isp_resource_node *rsrc_node,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+ int rc = -EINVAL;
+
+ if (!rsrc_node || !cmd_args) {
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
+ return -EINVAL;
+ }
+
+ switch (cmd_type) {
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+ rc = cam_vfe_camif_lite_get_reg_update(rsrc_node, cmd_args,
+ arg_size);
+ break;
+ default:
+ CAM_ERR(CAM_ISP,
+ "unsupported process command:%d", cmd_type);
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ return -EPERM;
+}
+
+static int cam_vfe_camif_lite_handle_irq_bottom_half(
+ void *handler_priv,
+ void *evt_payload_priv)
+{
+ int ret = CAM_VFE_IRQ_STATUS_ERR;
+ struct cam_isp_resource_node *camif_lite_node;
+ struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
+ struct cam_vfe_top_irq_evt_payload *payload;
+ uint32_t irq_status0;
+ uint32_t irq_status1;
+
+ if (!handler_priv || !evt_payload_priv) {
+ CAM_ERR(CAM_ISP, "Invalid params");
+ return ret;
+ }
+
+ camif_lite_node = handler_priv;
+ camif_lite_priv = camif_lite_node->res_priv;
+ payload = evt_payload_priv;
+ irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+ irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+
+ CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+ CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+
+ switch (payload->evt_id) {
+ case CAM_ISP_HW_EVENT_SOF:
+ if (irq_status0 &
+ camif_lite_priv->reg_data->lite_sof_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received SOF");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_EPOCH:
+ if (irq_status0 &
+ camif_lite_priv->reg_data->lite_epoch0_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received EPOCH");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_REG_UPDATE:
+ if (irq_status0 &
+ camif_lite_priv->reg_data->dual_pd_reg_upd_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_EOF:
+ if (irq_status0 &
+ camif_lite_priv->reg_data->lite_eof_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received EOF\n");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_ERROR:
+ if (irq_status1 &
+ camif_lite_priv->reg_data->lite_error_irq_mask1) {
+ CAM_DBG(CAM_ISP, "Received ERROR\n");
+ ret = CAM_ISP_HW_ERROR_OVERFLOW;
+ } else {
+ ret = CAM_ISP_HW_ERROR_NONE;
+ }
+ break;
+ default:
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "returning status = %d", ret);
+ return ret;
+}
+
+int cam_vfe_camif_lite_ver2_init(
+ struct cam_hw_intf *hw_intf,
+ struct cam_hw_soc_info *soc_info,
+ void *camif_lite_hw_info,
+ struct cam_isp_resource_node *camif_lite_node)
+{
+ struct cam_vfe_mux_camif_lite_data *camif_lite_priv = NULL;
+ struct cam_vfe_camif_lite_ver2_hw_info *camif_lite_info =
+ camif_lite_hw_info;
+
+ camif_lite_priv = kzalloc(sizeof(*camif_lite_priv),
+ GFP_KERNEL);
+ if (!camif_lite_priv)
+ return -ENOMEM;
+
+ camif_lite_node->res_priv = camif_lite_priv;
+
+ camif_lite_priv->mem_base =
+ soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+ camif_lite_priv->camif_lite_reg = camif_lite_info->camif_lite_reg;
+ camif_lite_priv->common_reg = camif_lite_info->common_reg;
+ camif_lite_priv->reg_data = camif_lite_info->reg_data;
+ camif_lite_priv->hw_intf = hw_intf;
+ camif_lite_priv->soc_info = soc_info;
+
+ camif_lite_node->init = NULL;
+ camif_lite_node->deinit = NULL;
+ camif_lite_node->start = cam_vfe_camif_lite_resource_start;
+ camif_lite_node->stop = cam_vfe_camif_lite_resource_stop;
+ camif_lite_node->process_cmd = cam_vfe_camif_lite_process_cmd;
+ camif_lite_node->top_half_handler =
+ cam_vfe_camif_lite_handle_irq_top_half;
+ camif_lite_node->bottom_half_handler =
+ cam_vfe_camif_lite_handle_irq_bottom_half;
+
+ return 0;
+}
+
+int cam_vfe_camif_lite_ver2_deinit(
+ struct cam_isp_resource_node *camif_lite_node)
+{
+ struct cam_vfe_mux_camif_data *camif_lite_priv =
+ camif_lite_node->res_priv;
+
+ camif_lite_node->start = NULL;
+ camif_lite_node->stop = NULL;
+ camif_lite_node->process_cmd = NULL;
+ camif_lite_node->top_half_handler = NULL;
+ camif_lite_node->bottom_half_handler = NULL;
+
+ camif_lite_node->res_priv = NULL;
+
+ if (!camif_lite_priv) {
+ CAM_ERR(CAM_ISP, "Error! camif_priv is NULL");
+ return -ENODEV;
+ }
+
+ kfree(camif_lite_priv);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
new file mode 100644
index 000000000000..118e870f76e7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_CAMIF_LITE_VER2_H_
+#define _CAM_VFE_CAMIF_LITE_VER2_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+struct cam_vfe_camif_lite_ver2_reg {
+ uint32_t camif_lite_cmd;
+ uint32_t camif_lite_config;
+ uint32_t lite_skip_period;
+ uint32_t lite_irq_subsample_pattern;
+ uint32_t lite_epoch_irq;
+ uint32_t reg_update_cmd;
+};
+
+struct cam_vfe_camif_lite_ver2_reg_data {
+ uint32_t dual_pd_reg_update_cmd_data;
+ uint32_t lite_epoch_line_cfg;
+ uint32_t lite_sof_irq_mask;
+ uint32_t lite_epoch0_irq_mask;
+ uint32_t dual_pd_reg_upd_irq_mask;
+ uint32_t lite_eof_irq_mask;
+ uint32_t lite_error_irq_mask0;
+ uint32_t lite_error_irq_mask1;
+ uint32_t extern_reg_update_shift;
+ uint32_t dual_pd_path_sel_shift;
+};
+
+struct cam_vfe_camif_lite_ver2_hw_info {
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_camif_lite_ver2_reg *camif_lite_reg;
+ struct cam_vfe_camif_lite_ver2_reg_data *reg_data;
+};
+
+int cam_vfe_camif_lite_ver2_acquire_resource(
+ struct cam_isp_resource_node *camif_lite_res,
+ void *acquire_param);
+
+int cam_vfe_camif_lite_ver2_init(
+ struct cam_hw_intf *hw_intf,
+ struct cam_hw_soc_info *soc_info,
+ void *camif_lite_hw_info,
+ struct cam_isp_resource_node *camif_lite_node);
+
+int cam_vfe_camif_lite_ver2_deinit(
+ struct cam_isp_resource_node *camif_node);
+
+#endif /* _CAM_VFE_CAMIF_LITE_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
new file mode 100644
index 000000000000..95bed8d9d23c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -0,0 +1,658 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_soc.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_camif_ver2.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+#include "cam_cpas_api.h"
+
+#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2
+
+struct cam_vfe_mux_camif_data {
+ void __iomem *mem_base;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_camif_ver2_reg *camif_reg;
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_camif_reg_data *reg_data;
+ struct cam_hw_soc_info *soc_info;
+
+ enum cam_isp_hw_sync_mode sync_mode;
+ uint32_t dsp_mode;
+ uint32_t pix_pattern;
+ uint32_t first_pixel;
+ uint32_t first_line;
+ uint32_t last_pixel;
+ uint32_t last_line;
+ bool enable_sof_irq_debug;
+ uint32_t irq_debug_cnt;
+ uint32_t camif_debug;
+};
+
+static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern)
+{
+ int rc;
+
+ switch (pattern) {
+ case CAM_ISP_PATTERN_BAYER_RGRGRG:
+ case CAM_ISP_PATTERN_BAYER_GRGRGR:
+ case CAM_ISP_PATTERN_BAYER_BGBGBG:
+ case CAM_ISP_PATTERN_BAYER_GBGBGB:
+ case CAM_ISP_PATTERN_YUV_YCBYCR:
+ case CAM_ISP_PATTERN_YUV_YCRYCB:
+ case CAM_ISP_PATTERN_YUV_CBYCRY:
+ case CAM_ISP_PATTERN_YUV_CRYCBY:
+ rc = 0;
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Error! Invalid pix pattern:%d", pattern);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int cam_vfe_camif_get_reg_update(
+ struct cam_isp_resource_node *camif_res,
+ void *cmd_args, uint32_t arg_size)
+{
+ uint32_t size = 0;
+ uint32_t reg_val_pair[2];
+ struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
+ struct cam_cdm_utils_ops *cdm_util_ops = NULL;
+ struct cam_vfe_mux_camif_data *rsrc_data = NULL;
+
+ if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+ CAM_ERR(CAM_ISP, "Invalid cmd size");
+ return -EINVAL;
+ }
+
+ if (!cdm_args || !cdm_args->res) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ return -EINVAL;
+ }
+
+ cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+ if (!cdm_util_ops) {
+ CAM_ERR(CAM_ISP, "Invalid CDM ops");
+ return -EINVAL;
+ }
+
+ size = cdm_util_ops->cdm_required_size_reg_random(1);
+ /* since cdm returns dwords, we need to convert it into bytes */
+ if ((size * 4) > cdm_args->cmd.size) {
+ CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+ cdm_args->cmd.size, size);
+ return -EINVAL;
+ }
+
+ rsrc_data = camif_res->res_priv;
+ reg_val_pair[0] = rsrc_data->camif_reg->reg_update_cmd;
+ reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data;
+ CAM_DBG(CAM_ISP, "CAMIF reg_update_cmd %x offset %x",
+ reg_val_pair[1], reg_val_pair[0]);
+
+ cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
+ 1, reg_val_pair);
+
+ cdm_args->cmd.used_bytes = size * 4;
+
+ return 0;
+}
+
+int cam_vfe_camif_ver2_acquire_resource(
+ struct cam_isp_resource_node *camif_res,
+ void *acquire_param)
+{
+ struct cam_vfe_mux_camif_data *camif_data;
+ struct cam_vfe_acquire_args *acquire_data;
+
+ int rc = 0;
+
+ camif_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+ acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
+
+ rc = cam_vfe_camif_validate_pix_pattern(
+ acquire_data->vfe_in.in_port->test_pattern);
+ if (rc)
+ return rc;
+
+ camif_data->sync_mode = acquire_data->vfe_in.sync_mode;
+ camif_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern;
+ camif_data->dsp_mode = acquire_data->vfe_in.in_port->dsp_mode;
+ camif_data->first_pixel = acquire_data->vfe_in.in_port->left_start;
+ camif_data->last_pixel = acquire_data->vfe_in.in_port->left_stop;
+ camif_data->first_line = acquire_data->vfe_in.in_port->line_start;
+ camif_data->last_line = acquire_data->vfe_in.in_port->line_stop;
+
+ CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
+ camif_res->hw_intf->hw_idx,
+ camif_data->pix_pattern, camif_data->dsp_mode);
+ return rc;
+}
+
+static int cam_vfe_camif_resource_init(
+ struct cam_isp_resource_node *camif_res,
+ void *init_args, uint32_t arg_size)
+{
+ struct cam_vfe_mux_camif_data *camif_data;
+ struct cam_hw_soc_info *soc_info;
+ int rc = 0;
+
+ if (!camif_res) {
+ CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+ return -EINVAL;
+ }
+
+ camif_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+ soc_info = camif_data->soc_info;
+
+ if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+ (camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+ rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+ if (rc)
+ CAM_ERR(CAM_ISP, "failed to enable dsp clk");
+ }
+
+ return rc;
+}
+
+static int cam_vfe_camif_resource_deinit(
+ struct cam_isp_resource_node *camif_res,
+ void *init_args, uint32_t arg_size)
+{
+ struct cam_vfe_mux_camif_data *camif_data;
+ struct cam_hw_soc_info *soc_info;
+ int rc = 0;
+
+ if (!camif_res) {
+ CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+ return -EINVAL;
+ }
+
+ camif_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+ soc_info = camif_data->soc_info;
+
+ if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+ (camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+ rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+ if (rc)
+ CAM_ERR(CAM_ISP, "failed to disable dsp clk");
+ }
+
+ return rc;
+
+}
+
+static int cam_vfe_camif_resource_start(
+ struct cam_isp_resource_node *camif_res)
+{
+ struct cam_vfe_mux_camif_data *rsrc_data;
+ uint32_t val = 0;
+ uint32_t epoch0_irq_mask;
+ uint32_t epoch1_irq_mask;
+ uint32_t computed_epoch_line_cfg;
+ uint32_t camera_hw_version = 0;
+ int rc = 0;
+
+ if (!camif_res) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_ERR(CAM_ISP, "Error! Invalid camif res res_state:%d",
+ camif_res->res_state);
+ return -EINVAL;
+ }
+
+ rsrc_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+ /*config vfe core*/
+ val = (rsrc_data->pix_pattern <<
+ rsrc_data->reg_data->pixel_pattern_shift);
+ if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+ val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
+
+ if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+ (rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+ /* DSP mode reg val is CAM_ISP_DSP_MODE - 1 */
+ val |= (((rsrc_data->dsp_mode - 1) &
+ rsrc_data->reg_data->dsp_mode_mask) <<
+ rsrc_data->reg_data->dsp_mode_shift);
+ val |= (0x1 << rsrc_data->reg_data->dsp_en_shift);
+ }
+
+ cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->core_cfg);
+
+ CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d", camif_res->hw_intf->hw_idx,
+ val);
+
+ /* disable the CGC for stats */
+ cam_io_w_mb(0xFFFFFFFF, rsrc_data->mem_base +
+ rsrc_data->common_reg->module_ctrl[
+ CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd);
+
+ /* get the HW version */
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Couldn't find HW version. rc: %d", rc);
+ return rc;
+ }
+
+ /* epoch config */
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_170_V100:
+ case CAM_CPAS_TITAN_170_V110:
+ case CAM_CPAS_TITAN_170_V120:
+ cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+ rsrc_data->mem_base +
+ rsrc_data->camif_reg->epoch_irq);
+ break;
+ default:
+ epoch0_irq_mask = ((rsrc_data->last_line -
+ rsrc_data->first_line) / 2) +
+ rsrc_data->first_line;
+ epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg &
+ 0xFFFF;
+ computed_epoch_line_cfg = (epoch0_irq_mask << 16) |
+ epoch1_irq_mask;
+ cam_io_w_mb(computed_epoch_line_cfg,
+ rsrc_data->mem_base +
+ rsrc_data->camif_reg->epoch_irq);
+ CAM_DBG(CAM_ISP, "first_line: %u\n"
+ "last_line: %u\n"
+ "epoch_line_cfg: 0x%x",
+ rsrc_data->first_line,
+ rsrc_data->last_line,
+ computed_epoch_line_cfg);
+ break;
+ }
+
+ camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+ /* Reg Update */
+ cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
+ rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd);
+ CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx,
+ rsrc_data->reg_data->reg_update_cmd_data);
+
+ /* disable sof irq debug flag */
+ rsrc_data->enable_sof_irq_debug = false;
+ rsrc_data->irq_debug_cnt = 0;
+
+ if (rsrc_data->camif_debug &
+ CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+ val = cam_io_r_mb(rsrc_data->mem_base +
+ rsrc_data->camif_reg->vfe_diag_config);
+ val |= rsrc_data->reg_data->enable_diagnostic_hw;
+ cam_io_w_mb(val, rsrc_data->mem_base +
+ rsrc_data->camif_reg->vfe_diag_config);
+ }
+
+ CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
+ return 0;
+}
+
+static int cam_vfe_camif_reg_dump(
+ struct cam_isp_resource_node *camif_res)
+{
+ struct cam_vfe_mux_camif_data *camif_priv;
+ struct cam_vfe_soc_private *soc_private;
+ int rc = 0, i;
+ uint32_t val = 0;
+
+ if (!camif_res) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) ||
+ (camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE))
+ return 0;
+
+ camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+ soc_private = camif_priv->soc_info->soc_private;
+ for (i = 0xA3C; i <= 0xA90; i += 4) {
+ val = cam_io_r_mb(camif_priv->mem_base + i);
+ CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+ }
+
+ for (i = 0xE0C; i <= 0xE3C; i += 4) {
+ val = cam_io_r_mb(camif_priv->mem_base + i);
+ CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+ }
+
+ for (i = 0x2000; i <= 0x20B8; i += 4) {
+ val = cam_io_r_mb(camif_priv->mem_base + i);
+ CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+ }
+
+ for (i = 0x2500; i <= 0x255C; i += 4) {
+ val = cam_io_r_mb(camif_priv->mem_base + i);
+ CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+ }
+
+ for (i = 0x2600; i <= 0x265C; i += 4) {
+ val = cam_io_r_mb(camif_priv->mem_base + i);
+ CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+ }
+
+ cam_cpas_reg_read(soc_private->cpas_handle,
+ CAM_CPAS_REG_CAMNOC, 0x420, true, &val);
+ CAM_INFO(CAM_ISP, "IFE02_MAXWR_LOW offset 0x420 val 0x%x", val);
+
+ cam_cpas_reg_read(soc_private->cpas_handle,
+ CAM_CPAS_REG_CAMNOC, 0x820, true, &val);
+ CAM_INFO(CAM_ISP, "IFE13_MAXWR_LOW offset 0x820 val 0x%x", val);
+
+ return rc;
+}
+
+static int cam_vfe_camif_reg_dump_bh(struct cam_vfe_mux_camif_data *camif_priv)
+{
+ uint32_t offset, val, wm_idx;
+
+ for (offset = 0x0; offset < 0x1000; offset += 0x4) {
+ val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+ CAM_INFO(CAM_ISP, "offset 0x%x value 0x%x", offset, val);
+ }
+
+ for (offset = 0x2000; offset <= 0x20B8; offset += 0x4) {
+ val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+ CAM_INFO(CAM_ISP, "offset 0x%x value 0x%x", offset, val);
+ }
+
+ for (wm_idx = 0; wm_idx <= 23; wm_idx++) {
+ for (offset = 0x2200 + 0x100 * wm_idx;
+ offset < 0x2278 + 0x100 * wm_idx; offset += 0x4) {
+ val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+ CAM_INFO(CAM_ISP,
+ "offset 0x%x value 0x%x", offset, val);
+ }
+ }
+
+ offset = 0x420;
+ val = cam_soc_util_r(camif_priv->soc_info, 1, offset);
+ CAM_INFO(CAM_ISP, "CAMNOC IFE02 MaxWR_LOW offset 0x%x value 0x%x",
+ offset, val);
+
+ offset = 0x820;
+ val = cam_soc_util_r(camif_priv->soc_info, 1, offset);
+ CAM_INFO(CAM_ISP, "CAMNOC IFE13 MaxWR_LOW offset 0x%x value 0x%x",
+ offset, val);
+
+ return 0;
+}
+
+static int cam_vfe_camif_resource_stop(
+ struct cam_isp_resource_node *camif_res)
+{
+ struct cam_vfe_mux_camif_data *camif_priv;
+ struct cam_vfe_camif_ver2_reg *camif_reg;
+ int rc = 0;
+ uint32_t val = 0;
+
+ if (!camif_res) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED ||
+ camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)
+ return 0;
+
+ camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+ camif_reg = camif_priv->camif_reg;
+
+ if ((camif_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+ (camif_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+ val = cam_io_r_mb(camif_priv->mem_base +
+ camif_priv->common_reg->core_cfg);
+ val &= (~(1 << camif_priv->reg_data->dsp_en_shift));
+ cam_io_w_mb(val, camif_priv->mem_base +
+ camif_priv->common_reg->core_cfg);
+ }
+
+ if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+ camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+ val = cam_io_r_mb(camif_priv->mem_base +
+ camif_priv->camif_reg->vfe_diag_config);
+ if (val & camif_priv->reg_data->enable_diagnostic_hw) {
+ val &= ~camif_priv->reg_data->enable_diagnostic_hw;
+ cam_io_w_mb(val, camif_priv->mem_base +
+ camif_priv->camif_reg->vfe_diag_config);
+ }
+
+ return rc;
+}
+
+static int cam_vfe_camif_sof_irq_debug(
+ struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+ struct cam_vfe_mux_camif_data *camif_priv;
+ uint32_t *enable_sof_irq = (uint32_t *)cmd_args;
+
+ camif_priv =
+ (struct cam_vfe_mux_camif_data *)rsrc_node->res_priv;
+
+ if (*enable_sof_irq == 1)
+ camif_priv->enable_sof_irq_debug = true;
+ else
+ camif_priv->enable_sof_irq_debug = false;
+
+ return 0;
+}
+
+static int cam_vfe_camif_process_cmd(struct cam_isp_resource_node *rsrc_node,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+ int rc = -EINVAL;
+ struct cam_vfe_mux_camif_data *camif_priv = NULL;
+
+ if (!rsrc_node || !cmd_args) {
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
+ return -EINVAL;
+ }
+
+ switch (cmd_type) {
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+ rc = cam_vfe_camif_get_reg_update(rsrc_node, cmd_args,
+ arg_size);
+ break;
+ case CAM_ISP_HW_CMD_GET_REG_DUMP:
+ rc = cam_vfe_camif_reg_dump(rsrc_node);
+ break;
+ case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG:
+ rc = cam_vfe_camif_sof_irq_debug(rsrc_node, cmd_args);
+ break;
+ case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
+ camif_priv =
+ (struct cam_vfe_mux_camif_data *)rsrc_node->res_priv;
+ camif_priv->camif_debug = *((uint32_t *)cmd_args);
+ break;
+ default:
+ CAM_ERR(CAM_ISP,
+ "unsupported process command:%d", cmd_type);
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_vfe_camif_handle_irq_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ return -EPERM;
+}
+
+static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
+ void *evt_payload_priv)
+{
+ int ret = CAM_VFE_IRQ_STATUS_ERR;
+ struct cam_isp_resource_node *camif_node;
+ struct cam_vfe_mux_camif_data *camif_priv;
+ struct cam_vfe_top_irq_evt_payload *payload;
+ uint32_t irq_status0;
+ uint32_t irq_status1;
+ uint32_t val;
+
+ if (!handler_priv || !evt_payload_priv) {
+ CAM_ERR(CAM_ISP, "Invalid params");
+ return ret;
+ }
+
+ camif_node = handler_priv;
+ camif_priv = camif_node->res_priv;
+ payload = evt_payload_priv;
+ irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+ irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+
+ CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+ CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+
+ switch (payload->evt_id) {
+ case CAM_ISP_HW_EVENT_SOF:
+ if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
+ if ((camif_priv->enable_sof_irq_debug) &&
+ (camif_priv->irq_debug_cnt <=
+ CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+
+ camif_priv->irq_debug_cnt++;
+ if (camif_priv->irq_debug_cnt ==
+ CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+ camif_priv->enable_sof_irq_debug =
+ false;
+ camif_priv->irq_debug_cnt = 0;
+ }
+ } else {
+ CAM_DBG(CAM_ISP, "Received SOF");
+ }
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_EPOCH:
+ if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received EPOCH");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_REG_UPDATE:
+ if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_EOF:
+ if (irq_status0 & camif_priv->reg_data->eof_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received EOF\n");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_ERROR:
+ if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
+ CAM_DBG(CAM_ISP, "Received ERROR\n");
+ ret = CAM_ISP_HW_ERROR_OVERFLOW;
+ cam_vfe_camif_reg_dump(camif_node);
+ cam_vfe_camif_reg_dump_bh(camif_node->res_priv);
+ } else {
+ ret = CAM_ISP_HW_ERROR_NONE;
+ }
+
+ if (camif_priv->camif_debug &
+ CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+ val = cam_io_r(camif_priv->mem_base +
+ camif_priv->camif_reg->vfe_diag_sensor_status);
+ CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+ camif_priv->mem_base, val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "returing status = %d", ret);
+ return ret;
+}
+
+int cam_vfe_camif_ver2_init(
+ struct cam_hw_intf *hw_intf,
+ struct cam_hw_soc_info *soc_info,
+ void *camif_hw_info,
+ struct cam_isp_resource_node *camif_node)
+{
+ struct cam_vfe_mux_camif_data *camif_priv = NULL;
+ struct cam_vfe_camif_ver2_hw_info *camif_info = camif_hw_info;
+
+ camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_data),
+ GFP_KERNEL);
+ if (!camif_priv) {
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for camif_priv");
+ return -ENOMEM;
+ }
+
+ camif_node->res_priv = camif_priv;
+
+ camif_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+ camif_priv->camif_reg = camif_info->camif_reg;
+ camif_priv->common_reg = camif_info->common_reg;
+ camif_priv->reg_data = camif_info->reg_data;
+ camif_priv->hw_intf = hw_intf;
+ camif_priv->soc_info = soc_info;
+
+ camif_node->init = cam_vfe_camif_resource_init;
+ camif_node->deinit = cam_vfe_camif_resource_deinit;
+ camif_node->start = cam_vfe_camif_resource_start;
+ camif_node->stop = cam_vfe_camif_resource_stop;
+ camif_node->process_cmd = cam_vfe_camif_process_cmd;
+ camif_node->top_half_handler = cam_vfe_camif_handle_irq_top_half;
+ camif_node->bottom_half_handler = cam_vfe_camif_handle_irq_bottom_half;
+
+ return 0;
+}
+
+int cam_vfe_camif_ver2_deinit(
+ struct cam_isp_resource_node *camif_node)
+{
+ struct cam_vfe_mux_camif_data *camif_priv = camif_node->res_priv;
+
+ camif_node->start = NULL;
+ camif_node->stop = NULL;
+ camif_node->process_cmd = NULL;
+ camif_node->top_half_handler = NULL;
+ camif_node->bottom_half_handler = NULL;
+
+ camif_node->res_priv = NULL;
+
+ if (!camif_priv) {
+ CAM_ERR(CAM_ISP, "Error! camif_priv is NULL");
+ return -ENODEV;
+ }
+
+ kfree(camif_priv);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
new file mode 100644
index 000000000000..7a6958930caa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_CAMIF_VER2_H_
+#define _CAM_VFE_CAMIF_VER2_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+/*
+ * Debug values for camif module
+ */
+#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS BIT(0)
+
+struct cam_vfe_camif_ver2_reg {
+ uint32_t camif_cmd;
+ uint32_t camif_config;
+ uint32_t line_skip_pattern;
+ uint32_t pixel_skip_pattern;
+ uint32_t skip_period;
+ uint32_t irq_subsample_pattern;
+ uint32_t epoch_irq;
+ uint32_t raw_crop_width_cfg;
+ uint32_t raw_crop_height_cfg;
+ uint32_t reg_update_cmd;
+ uint32_t vfe_diag_config;
+ uint32_t vfe_diag_sensor_status;
+};
+
+struct cam_vfe_camif_reg_data {
+ uint32_t raw_crop_first_pixel_shift;
+ uint32_t raw_crop_first_pixel_mask;
+
+ uint32_t raw_crop_last_pixel_shift;
+ uint32_t raw_crop_last_pixel_mask;
+
+ uint32_t raw_crop_first_line_shift;
+ uint32_t raw_crop_first_line_mask;
+
+ uint32_t raw_crop_last_line_shift;
+ uint32_t raw_crop_last_line_mask;
+
+ uint32_t input_mux_sel_shift;
+ uint32_t input_mux_sel_mask;
+ uint32_t extern_reg_update_shift;
+ uint32_t extern_reg_update_mask;
+
+ uint32_t pixel_pattern_shift;
+ uint32_t pixel_pattern_mask;
+
+ uint32_t dsp_mode_shift;
+ uint32_t dsp_mode_mask;
+ uint32_t dsp_en_shift;
+ uint32_t dsp_en_mask;
+
+ uint32_t reg_update_cmd_data;
+ uint32_t epoch_line_cfg;
+ uint32_t sof_irq_mask;
+ uint32_t epoch0_irq_mask;
+ uint32_t reg_update_irq_mask;
+ uint32_t eof_irq_mask;
+ uint32_t error_irq_mask0;
+ uint32_t error_irq_mask1;
+
+ uint32_t enable_diagnostic_hw;
+};
+
+struct cam_vfe_camif_ver2_hw_info {
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_camif_ver2_reg *camif_reg;
+ struct cam_vfe_camif_reg_data *reg_data;
+};
+
+int cam_vfe_camif_ver2_acquire_resource(
+ struct cam_isp_resource_node *camif_res,
+ void *acquire_param);
+
+int cam_vfe_camif_ver2_init(
+ struct cam_hw_intf *hw_intf,
+ struct cam_hw_soc_info *soc_info,
+ void *camif_hw_info,
+ struct cam_isp_resource_node *camif_node);
+
+int cam_vfe_camif_ver2_deinit(
+ struct cam_isp_resource_node *camif_node);
+
+#endif /* _CAM_VFE_CAMIF_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
new file mode 100644
index 000000000000..230698f7f890
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -0,0 +1,301 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "cam_vfe_rdi.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+
+struct cam_vfe_mux_rdi_data {
+ void __iomem *mem_base;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_rdi_ver2_reg *rdi_reg;
+ struct cam_vfe_rdi_reg_data *reg_data;
+
+ enum cam_isp_hw_sync_mode sync_mode;
+};
+
+static int cam_vfe_rdi_get_reg_update(
+ struct cam_isp_resource_node *rdi_res,
+ void *cmd_args, uint32_t arg_size)
+{
+ uint32_t size = 0;
+ uint32_t reg_val_pair[2];
+ struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
+ struct cam_cdm_utils_ops *cdm_util_ops = NULL;
+ struct cam_vfe_mux_rdi_data *rsrc_data = NULL;
+
+ if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+ CAM_ERR(CAM_ISP, "Error - Invalid cmd size");
+ return -EINVAL;
+ }
+
+ if (!cdm_args || !cdm_args->res) {
+ CAM_ERR(CAM_ISP, "Error - Invalid args");
+ return -EINVAL;
+ }
+
+ cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+ if (!cdm_util_ops) {
+ CAM_ERR(CAM_ISP, "Error - Invalid CDM ops");
+ return -EINVAL;
+ }
+
+ size = cdm_util_ops->cdm_required_size_reg_random(1);
+ /* since cdm returns dwords, we need to convert it into bytes */
+ if ((size * 4) > cdm_args->cmd.size) {
+ CAM_ERR(CAM_ISP,
+ "Error - buf size:%d is not sufficient, expected: %d",
+ cdm_args->cmd.size, size * 4);
+ return -EINVAL;
+ }
+
+ rsrc_data = rdi_res->res_priv;
+ reg_val_pair[0] = rsrc_data->rdi_reg->reg_update_cmd;
+ reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data;
+ CAM_DBG(CAM_ISP, "RDI%d reg_update_cmd %x",
+ rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0, reg_val_pair[1]);
+
+ cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
+ 1, reg_val_pair);
+ cdm_args->cmd.used_bytes = size * 4;
+
+ return 0;
+}
+
+int cam_vfe_rdi_ver2_acquire_resource(
+ struct cam_isp_resource_node *rdi_res,
+ void *acquire_param)
+{
+ struct cam_vfe_mux_rdi_data *rdi_data;
+ struct cam_vfe_acquire_args *acquire_data;
+
+ rdi_data = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
+ acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
+
+ rdi_data->sync_mode = acquire_data->vfe_in.sync_mode;
+
+ return 0;
+}
+
+static int cam_vfe_rdi_resource_start(
+ struct cam_isp_resource_node *rdi_res)
+{
+ struct cam_vfe_mux_rdi_data *rsrc_data;
+ int rc = 0;
+
+ if (!rdi_res) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ if (rdi_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_ERR(CAM_ISP, "Error! Invalid rdi res res_state:%d",
+ rdi_res->res_state);
+ return -EINVAL;
+ }
+
+ rsrc_data = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
+ rdi_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+ /* Reg Update */
+ cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
+ rsrc_data->mem_base + rsrc_data->rdi_reg->reg_update_cmd);
+
+ CAM_DBG(CAM_ISP, "Start RDI %d",
+ rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0);
+
+ return rc;
+}
+
+
+static int cam_vfe_rdi_resource_stop(
+ struct cam_isp_resource_node *rdi_res)
+{
+ struct cam_vfe_mux_rdi_data *rdi_priv;
+ int rc = 0;
+
+ if (!rdi_res) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED ||
+ rdi_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)
+ return 0;
+
+ rdi_priv = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
+
+ if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+ rdi_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+
+ return rc;
+}
+
+static int cam_vfe_rdi_process_cmd(struct cam_isp_resource_node *rsrc_node,
+ uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+ int rc = -EINVAL;
+
+ if (!rsrc_node || !cmd_args) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ switch (cmd_type) {
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+ rc = cam_vfe_rdi_get_reg_update(rsrc_node, cmd_args,
+ arg_size);
+ break;
+ default:
+ CAM_ERR(CAM_ISP,
+ "unsupported RDI process command:%d", cmd_type);
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_vfe_rdi_handle_irq_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ return -EPERM;
+}
+
+static int cam_vfe_rdi_handle_irq_bottom_half(void *handler_priv,
+ void *evt_payload_priv)
+{
+ int ret = CAM_VFE_IRQ_STATUS_ERR;
+ struct cam_isp_resource_node *rdi_node;
+ struct cam_vfe_mux_rdi_data *rdi_priv;
+ struct cam_vfe_top_irq_evt_payload *payload;
+ uint32_t irq_status0;
+
+ if (!handler_priv || !evt_payload_priv) {
+ CAM_ERR(CAM_ISP, "Invalid params");
+ return ret;
+ }
+
+ rdi_node = handler_priv;
+ rdi_priv = rdi_node->res_priv;
+ payload = evt_payload_priv;
+ irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+
+ CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+ CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+
+ switch (payload->evt_id) {
+ case CAM_ISP_HW_EVENT_SOF:
+ if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received SOF");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ case CAM_ISP_HW_EVENT_REG_UPDATE:
+ if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received REG UPDATE");
+ ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ break;
+ default:
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "returing status = %d", ret);
+ return ret;
+}
+
+int cam_vfe_rdi_ver2_init(
+ struct cam_hw_intf *hw_intf,
+ struct cam_hw_soc_info *soc_info,
+ void *rdi_hw_info,
+ struct cam_isp_resource_node *rdi_node)
+{
+ struct cam_vfe_mux_rdi_data *rdi_priv = NULL;
+ struct cam_vfe_rdi_ver2_hw_info *rdi_info = rdi_hw_info;
+
+ rdi_priv = kzalloc(sizeof(struct cam_vfe_mux_rdi_data),
+ GFP_KERNEL);
+ if (!rdi_priv) {
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for rdi_priv");
+ return -ENOMEM;
+ }
+
+ rdi_node->res_priv = rdi_priv;
+
+ rdi_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+ rdi_priv->hw_intf = hw_intf;
+ rdi_priv->common_reg = rdi_info->common_reg;
+ rdi_priv->rdi_reg = rdi_info->rdi_reg;
+
+ switch (rdi_node->res_id) {
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ rdi_priv->reg_data = rdi_info->reg_data[0];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ rdi_priv->reg_data = rdi_info->reg_data[1];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ rdi_priv->reg_data = rdi_info->reg_data[2];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ if (rdi_info->reg_data[3]) {
+ rdi_priv->reg_data = rdi_info->reg_data[3];
+ } else {
+ CAM_ERR(CAM_ISP, "Error! RDI3 is not supported");
+ goto err_init;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "invalid Resource id:%d", rdi_node->res_id);
+ goto err_init;
+ }
+
+ rdi_node->start = cam_vfe_rdi_resource_start;
+ rdi_node->stop = cam_vfe_rdi_resource_stop;
+ rdi_node->process_cmd = cam_vfe_rdi_process_cmd;
+ rdi_node->top_half_handler = cam_vfe_rdi_handle_irq_top_half;
+ rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half;
+
+ return 0;
+err_init:
+ kfree(rdi_priv);
+ return -EINVAL;
+}
+
+int cam_vfe_rdi_ver2_deinit(
+ struct cam_isp_resource_node *rdi_node)
+{
+ struct cam_vfe_mux_rdi_data *rdi_priv = rdi_node->res_priv;
+
+ rdi_node->start = NULL;
+ rdi_node->stop = NULL;
+ rdi_node->process_cmd = NULL;
+ rdi_node->top_half_handler = NULL;
+ rdi_node->bottom_half_handler = NULL;
+
+ rdi_node->res_priv = NULL;
+
+ if (!rdi_priv) {
+ CAM_ERR(CAM_ISP, "Error! rdi_priv NULL");
+ return -ENODEV;
+ }
+ kfree(rdi_priv);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
new file mode 100644
index 000000000000..a524b4496905
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_RDI_H_
+#define _CAM_VFE_RDI_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+#define CAM_VFE_RDI_VER2_MAX 4
+
+struct cam_vfe_rdi_ver2_reg {
+ uint32_t reg_update_cmd;
+};
+
+struct cam_vfe_rdi_reg_data {
+ uint32_t reg_update_cmd_data;
+ uint32_t sof_irq_mask;
+ uint32_t reg_update_irq_mask;
+};
+
+struct cam_vfe_rdi_ver2_hw_info {
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_rdi_ver2_reg *rdi_reg;
+ struct cam_vfe_rdi_reg_data *reg_data[CAM_VFE_RDI_VER2_MAX];
+};
+
+int cam_vfe_rdi_ver2_acquire_resource(
+ struct cam_isp_resource_node *rdi_res,
+ void *acquire_param);
+
+int cam_vfe_rdi_ver2_init(
+ struct cam_hw_intf *hw_intf,
+ struct cam_hw_soc_info *soc_info,
+ void *rdi_hw_info,
+ struct cam_isp_resource_node *rdi_node);
+
+int cam_vfe_rdi_ver2_deinit(
+ struct cam_isp_resource_node *rdi_node);
+
+#endif /* _CAM_VFE_RDI_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
new file mode 100644
index 000000000000..6042eb483c63
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_debug_util.h"
+
+int cam_vfe_top_init(uint32_t top_version,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ void *top_hw_info,
+ struct cam_vfe_top **vfe_top)
+{
+ int rc = -EINVAL;
+
+ switch (top_version) {
+ case CAM_VFE_TOP_VER_2_0:
+ rc = cam_vfe_top_ver2_init(soc_info, hw_intf, top_hw_info,
+ vfe_top);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
+ break;
+ }
+
+ return rc;
+}
+
+int cam_vfe_top_deinit(uint32_t top_version,
+ struct cam_vfe_top **vfe_top)
+{
+ int rc = -EINVAL;
+
+ switch (top_version) {
+ case CAM_VFE_TOP_VER_2_0:
+ rc = cam_vfe_top_ver2_deinit(vfe_top);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
new file mode 100644
index 000000000000..f54f52af5334
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -0,0 +1,842 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "cam_io_util.h"
+#include "cam_cdm_util.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_vfe_soc.h"
+
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL 0x00003F9F
+#define CAM_VFE_HW_RESET_HW_VAL 0x00003F87
+#define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
+
+struct cam_vfe_top_ver2_common_data {
+ struct cam_hw_soc_info *soc_info;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+};
+
+struct cam_vfe_top_ver2_priv {
+ struct cam_vfe_top_ver2_common_data common_data;
+ struct cam_isp_resource_node mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
+ unsigned long hw_clk_rate;
+ struct cam_axi_vote applied_axi_vote;
+ struct cam_axi_vote req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
+ unsigned long req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
+ struct cam_axi_vote last_vote[CAM_VFE_TOP_VER2_MUX_MAX *
+ CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES];
+ uint32_t last_counter;
+ enum cam_vfe_bw_control_action
+ axi_vote_control[CAM_VFE_TOP_VER2_MUX_MAX];
+};
+
+static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ uint32_t size = 0;
+ uint32_t mem_base = 0;
+ struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
+ struct cam_cdm_utils_ops *cdm_util_ops = NULL;
+
+ if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+ CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
+ return -EINVAL;
+ }
+
+ if (!cdm_args || !cdm_args->res || !top_priv ||
+ !top_priv->common_data.soc_info) {
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
+ return -EINVAL;
+ }
+
+ cdm_util_ops =
+ (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+ if (!cdm_util_ops) {
+ CAM_ERR(CAM_ISP, "Invalid CDM ops");
+ return -EINVAL;
+ }
+
+ size = cdm_util_ops->cdm_required_size_changebase();
+ /* since cdm returns dwords, we need to convert it into bytes */
+ if ((size * 4) > cdm_args->cmd.size) {
+ CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+ cdm_args->cmd.size, size);
+ return -EINVAL;
+ }
+
+ mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(
+ top_priv->common_data.soc_info, VFE_CORE_BASE_IDX);
+ CAM_DBG(CAM_ISP, "core %d mem_base 0x%x",
+ top_priv->common_data.soc_info->index, mem_base);
+
+ cdm_util_ops->cdm_write_changebase(
+ cdm_args->cmd.cmd_buf_addr, mem_base);
+ cdm_args->cmd.used_bytes = (size * 4);
+
+ return 0;
+}
+
+static int cam_vfe_top_set_hw_clk_rate(
+ struct cam_vfe_top_ver2_priv *top_priv)
+{
+ struct cam_hw_soc_info *soc_info = NULL;
+ int i, rc = 0;
+ unsigned long max_clk_rate = 0;
+
+ soc_info = top_priv->common_data.soc_info;
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->req_clk_rate[i] > max_clk_rate)
+ max_clk_rate = top_priv->req_clk_rate[i];
+ }
+ if (max_clk_rate == top_priv->hw_clk_rate)
+ return 0;
+
+ CAM_DBG(CAM_ISP, "VFE: Clock name=%s idx=%d clk=%llu",
+ soc_info->clk_name[soc_info->src_clk_idx],
+ soc_info->src_clk_idx, max_clk_rate);
+
+ rc = cam_soc_util_set_src_clk_rate(soc_info, max_clk_rate);
+
+ if (!rc)
+ top_priv->hw_clk_rate = max_clk_rate;
+ else
+ CAM_ERR(CAM_ISP, "Set Clock rate failed, rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_vfe_top_set_axi_bw_vote(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ bool start_stop)
+{
+ struct cam_axi_vote sum = {0, 0};
+ struct cam_axi_vote to_be_applied_axi_vote = {0, 0};
+ int i, rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ top_priv->common_data.soc_info;
+ struct cam_vfe_soc_private *soc_private =
+ soc_info->soc_private;
+ bool apply_bw_update = false;
+
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error soc_private NULL");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->axi_vote_control[i] ==
+ CAM_VFE_BW_CONTROL_INCLUDE) {
+ sum.uncompressed_bw +=
+ top_priv->req_axi_vote[i].uncompressed_bw;
+ sum.compressed_bw +=
+ top_priv->req_axi_vote[i].compressed_bw;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "Updating BW from (%llu %llu) to (%llu %llu)",
+ top_priv->applied_axi_vote.uncompressed_bw,
+ top_priv->applied_axi_vote.compressed_bw,
+ sum.uncompressed_bw,
+ sum.compressed_bw);
+
+ top_priv->last_vote[top_priv->last_counter] = sum;
+ top_priv->last_counter = (top_priv->last_counter + 1) %
+ (CAM_VFE_TOP_VER2_MUX_MAX *
+ CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+
+ if ((top_priv->applied_axi_vote.uncompressed_bw ==
+ sum.uncompressed_bw) &&
+ (top_priv->applied_axi_vote.compressed_bw ==
+ sum.compressed_bw)) {
+ CAM_DBG(CAM_ISP, "BW config unchanged %llu %llu",
+ top_priv->applied_axi_vote.uncompressed_bw,
+ top_priv->applied_axi_vote.compressed_bw);
+ return 0;
+ }
+
+ if (start_stop == true) {
+ /* need to vote current request immediately */
+ to_be_applied_axi_vote = sum;
+ /* Reset everything, we can start afresh */
+ memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+ (CAM_VFE_TOP_VER2_MUX_MAX *
+ CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+ top_priv->last_counter = 0;
+ top_priv->last_vote[top_priv->last_counter] = sum;
+ top_priv->last_counter = (top_priv->last_counter + 1) %
+ (CAM_VFE_TOP_VER2_MUX_MAX *
+ CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+ } else {
+ /*
+ * Find max bw request in last few frames. This will the bw
+ *that we want to vote to CPAS now.
+ */
+ for (i = 0; i < (CAM_VFE_TOP_VER2_MUX_MAX *
+ CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) {
+ if (to_be_applied_axi_vote.compressed_bw <
+ top_priv->last_vote[i].compressed_bw)
+ to_be_applied_axi_vote.compressed_bw =
+ top_priv->last_vote[i].compressed_bw;
+
+ if (to_be_applied_axi_vote.uncompressed_bw <
+ top_priv->last_vote[i].uncompressed_bw)
+ to_be_applied_axi_vote.uncompressed_bw =
+ top_priv->last_vote[i].uncompressed_bw;
+ }
+ }
+
+ if ((to_be_applied_axi_vote.uncompressed_bw !=
+ top_priv->applied_axi_vote.uncompressed_bw) ||
+ (to_be_applied_axi_vote.compressed_bw !=
+ top_priv->applied_axi_vote.compressed_bw))
+ apply_bw_update = true;
+
+ CAM_DBG(CAM_ISP, "apply_bw_update=%d", apply_bw_update);
+
+ if (apply_bw_update == true) {
+ rc = cam_cpas_update_axi_vote(
+ soc_private->cpas_handle,
+ &to_be_applied_axi_vote);
+ if (!rc) {
+ top_priv->applied_axi_vote.uncompressed_bw =
+ to_be_applied_axi_vote.uncompressed_bw;
+ top_priv->applied_axi_vote.compressed_bw =
+ to_be_applied_axi_vote.compressed_bw;
+ } else {
+ CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
+ }
+ }
+
+ return rc;
+}
+
+static int cam_vfe_top_clock_update(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_clock_update_args *clk_update = NULL;
+ struct cam_isp_resource_node *res = NULL;
+ struct cam_hw_info *hw_info = NULL;
+ int i, rc = 0;
+
+ clk_update =
+ (struct cam_vfe_clock_update_args *)cmd_args;
+ res = clk_update->node_res;
+
+ if (!res || !res->hw_intf->hw_priv) {
+ CAM_ERR(CAM_ISP, "Invalid input res %pK", res);
+ return -EINVAL;
+ }
+
+ hw_info = res->hw_intf->hw_priv;
+
+ if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+ res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+ CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+ res->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+ top_priv->req_clk_rate[i] = clk_update->clk_rate;
+ break;
+ }
+ }
+
+ if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_DBG(CAM_ISP,
+ "VFE:%d Not ready to set clocks yet :%d",
+ res->hw_intf->hw_idx,
+ hw_info->hw_state);
+ } else
+ rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+
+ return rc;
+}
+
+static int cam_vfe_top_bw_update(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_bw_update_args *bw_update = NULL;
+ struct cam_isp_resource_node *res = NULL;
+ struct cam_hw_info *hw_info = NULL;
+ int rc = 0;
+ int i;
+
+ bw_update = (struct cam_vfe_bw_update_args *)cmd_args;
+ res = bw_update->node_res;
+
+ if (!res || !res->hw_intf->hw_priv)
+ return -EINVAL;
+
+ hw_info = res->hw_intf->hw_priv;
+
+ if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+ res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+ CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+ res->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+ top_priv->req_axi_vote[i].uncompressed_bw =
+ bw_update->camnoc_bw_bytes;
+ top_priv->req_axi_vote[i].compressed_bw =
+ bw_update->external_bw_bytes;
+ top_priv->axi_vote_control[i] =
+ CAM_VFE_BW_CONTROL_INCLUDE;
+ break;
+ }
+ }
+
+ if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "VFE:%d Not ready to set BW yet :%d",
+ res->hw_intf->hw_idx,
+ hw_info->hw_state);
+ } else
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv, false);
+
+ return rc;
+}
+
+static int cam_vfe_top_bw_control(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_bw_control_args *bw_ctrl = NULL;
+ struct cam_isp_resource_node *res = NULL;
+ struct cam_hw_info *hw_info = NULL;
+ int rc = 0;
+ int i;
+
+ bw_ctrl = (struct cam_vfe_bw_control_args *)cmd_args;
+ res = bw_ctrl->node_res;
+
+ if (!res || !res->hw_intf->hw_priv)
+ return -EINVAL;
+
+ hw_info = res->hw_intf->hw_priv;
+
+ if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+ res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+ CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+ res->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+ top_priv->axi_vote_control[i] = bw_ctrl->action;
+ break;
+ }
+ }
+
+ if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "VFE:%d Not ready to set BW yet :%d",
+ res->hw_intf->hw_idx,
+ hw_info->hw_state);
+ } else {
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
+ }
+
+ return rc;
+}
+
+static int cam_vfe_top_mux_get_reg_update(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_isp_hw_get_cmd_update *cmd_update = cmd_args;
+
+ if (cmd_update->res->process_cmd)
+ return cmd_update->res->process_cmd(cmd_update->res,
+ CAM_ISP_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size);
+
+ return -EINVAL;
+}
+
+int cam_vfe_top_get_hw_caps(void *device_priv,
+ void *get_hw_cap_args, uint32_t arg_size)
+{
+ return -EPERM;
+}
+
+int cam_vfe_top_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_vfe_top_ver2_priv *top_priv = device_priv;
+
+ top_priv->hw_clk_rate = 0;
+
+ return 0;
+}
+
+int cam_vfe_top_reset(void *device_priv,
+ void *reset_core_args, uint32_t arg_size)
+{
+ struct cam_vfe_top_ver2_priv *top_priv = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
+ uint32_t *reset_reg_args = reset_core_args;
+ uint32_t reset_reg_val;
+
+ if (!top_priv || !reset_reg_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ switch (*reset_reg_args) {
+ case CAM_VFE_HW_RESET_HW_AND_REG:
+ reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+ break;
+ default:
+ reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val);
+ soc_info = top_priv->common_data.soc_info;
+ reg_common = top_priv->common_data.common_reg;
+
+ /* Mask All the IRQs except RESET */
+ cam_io_w_mb((1 << 31),
+ CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + 0x5C);
+
+ /* Reset HW */
+ cam_io_w_mb(reset_reg_val,
+ CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
+ reg_common->global_reset_cmd);
+
+ CAM_DBG(CAM_ISP, "Reset HW exit");
+ return 0;
+}
+
+int cam_vfe_top_reserve(void *device_priv,
+ void *reserve_args, uint32_t arg_size)
+{
+ struct cam_vfe_top_ver2_priv *top_priv;
+ struct cam_vfe_acquire_args *args;
+ struct cam_vfe_hw_vfe_in_acquire_args *acquire_args;
+ uint32_t i;
+ int rc = -EINVAL;
+
+ if (!device_priv || !reserve_args) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+ args = (struct cam_vfe_acquire_args *)reserve_args;
+ acquire_args = &args->vfe_in;
+
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == acquire_args->res_id &&
+ top_priv->mux_rsrc[i].res_state ==
+ CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+
+ if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+ rc = cam_vfe_camif_ver2_acquire_resource(
+ &top_priv->mux_rsrc[i],
+ args);
+ if (rc)
+ break;
+ }
+
+ if (acquire_args->res_id ==
+ CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
+ rc = cam_vfe_camif_lite_ver2_acquire_resource(
+ &top_priv->mux_rsrc[i],
+ args);
+ if (rc)
+ break;
+ }
+
+ top_priv->mux_rsrc[i].cdm_ops = acquire_args->cdm_ops;
+ top_priv->mux_rsrc[i].tasklet_info = args->tasklet;
+ top_priv->mux_rsrc[i].res_state =
+ CAM_ISP_RESOURCE_STATE_RESERVED;
+ acquire_args->rsrc_node =
+ &top_priv->mux_rsrc[i];
+
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+
+}
+
+int cam_vfe_top_release(void *device_priv,
+ void *release_args, uint32_t arg_size)
+{
+ struct cam_vfe_top_ver2_priv *top_priv;
+ struct cam_isp_resource_node *mux_res;
+
+ if (!device_priv || !release_args) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+ mux_res = (struct cam_isp_resource_node *)release_args;
+
+ CAM_DBG(CAM_ISP, "Resource in state %d", mux_res->res_state);
+ if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_ERR(CAM_ISP, "Error! Resource in Invalid res_state :%d",
+ mux_res->res_state);
+ return -EINVAL;
+ }
+ mux_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+ return 0;
+}
+
+int cam_vfe_top_start(void *device_priv,
+ void *start_args, uint32_t arg_size)
+{
+ struct cam_vfe_top_ver2_priv *top_priv;
+ struct cam_isp_resource_node *mux_res;
+ struct cam_hw_info *hw_info = NULL;
+ int rc = 0;
+
+ if (!device_priv || !start_args) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+ mux_res = (struct cam_isp_resource_node *)start_args;
+ hw_info = (struct cam_hw_info *)mux_res->hw_intf->hw_priv;
+
+ if (hw_info->hw_state == CAM_HW_STATE_POWER_UP) {
+ rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "set_hw_clk_rate failed, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "set_axi_bw_vote failed, rc=%d", rc);
+ return rc;
+ }
+
+ if (mux_res->start) {
+ rc = mux_res->start(mux_res);
+ } else {
+ CAM_ERR(CAM_ISP,
+ "Invalid res id:%d", mux_res->res_id);
+ rc = -EINVAL;
+ }
+ } else {
+ CAM_ERR(CAM_ISP, "VFE HW not powered up");
+ rc = -EPERM;
+ }
+
+ return rc;
+}
+
+int cam_vfe_top_stop(void *device_priv,
+ void *stop_args, uint32_t arg_size)
+{
+ struct cam_vfe_top_ver2_priv *top_priv;
+ struct cam_isp_resource_node *mux_res;
+ struct cam_hw_info *hw_info = NULL;
+ int i, rc = 0;
+
+ if (!device_priv || !stop_args) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+ return -EINVAL;
+ }
+
+ top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+ mux_res = (struct cam_isp_resource_node *)stop_args;
+ hw_info = (struct cam_hw_info *)mux_res->hw_intf->hw_priv;
+
+ if ((mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) ||
+ (mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF_LITE) ||
+ ((mux_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) &&
+ (mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3))) {
+ rc = mux_res->stop(mux_res);
+ } else {
+ CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
+ return -EINVAL;
+ }
+
+ if (!rc) {
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == mux_res->res_id) {
+ top_priv->req_clk_rate[i] = 0;
+ top_priv->req_axi_vote[i].compressed_bw = 0;
+ top_priv->req_axi_vote[i].uncompressed_bw = 0;
+ top_priv->axi_vote_control[i] =
+ CAM_VFE_BW_CONTROL_EXCLUDE;
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+int cam_vfe_top_read(void *device_priv,
+ void *read_args, uint32_t arg_size)
+{
+ return -EPERM;
+}
+
+int cam_vfe_top_write(void *device_priv,
+ void *write_args, uint32_t arg_size)
+{
+ return -EPERM;
+}
+
+int cam_vfe_top_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ int rc = 0;
+ struct cam_vfe_top_ver2_priv *top_priv;
+
+ if (!device_priv || !cmd_args) {
+ CAM_ERR(CAM_ISP, "Error! Invalid arguments");
+ return -EINVAL;
+ }
+ top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+
+ switch (cmd_type) {
+ case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
+ rc = cam_vfe_top_mux_get_base(top_priv, cmd_args, arg_size);
+ break;
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+ rc = cam_vfe_top_mux_get_reg_update(top_priv, cmd_args,
+ arg_size);
+ break;
+ case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+ rc = cam_vfe_top_clock_update(top_priv, cmd_args,
+ arg_size);
+ break;
+ case CAM_ISP_HW_CMD_BW_UPDATE:
+ rc = cam_vfe_top_bw_update(top_priv, cmd_args,
+ arg_size);
+ break;
+ case CAM_ISP_HW_CMD_BW_CONTROL:
+ rc = cam_vfe_top_bw_control(top_priv, cmd_args, arg_size);
+ break;
+ default:
+ rc = -EINVAL;
+ CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
+ break;
+ }
+
+ return rc;
+}
+
+int cam_vfe_top_ver2_init(
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ void *top_hw_info,
+ struct cam_vfe_top **vfe_top_ptr)
+{
+ int i, j, rc = 0;
+ struct cam_vfe_top_ver2_priv *top_priv = NULL;
+ struct cam_vfe_top_ver2_hw_info *ver2_hw_info = top_hw_info;
+ struct cam_vfe_top *vfe_top;
+
+ vfe_top = kzalloc(sizeof(struct cam_vfe_top), GFP_KERNEL);
+ if (!vfe_top) {
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ top_priv = kzalloc(sizeof(struct cam_vfe_top_ver2_priv),
+ GFP_KERNEL);
+ if (!top_priv) {
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top_priv");
+ rc = -ENOMEM;
+ goto free_vfe_top;
+ }
+ vfe_top->top_priv = top_priv;
+ top_priv->hw_clk_rate = 0;
+ top_priv->applied_axi_vote.compressed_bw = 0;
+ top_priv->applied_axi_vote.uncompressed_bw = 0;
+ memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+ (CAM_VFE_TOP_VER2_MUX_MAX *
+ CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+ top_priv->last_counter = 0;
+
+ for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
+ top_priv->mux_rsrc[i].hw_intf = hw_intf;
+ top_priv->mux_rsrc[i].res_state =
+ CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ top_priv->req_clk_rate[i] = 0;
+ top_priv->req_axi_vote[i].compressed_bw = 0;
+ top_priv->req_axi_vote[i].uncompressed_bw = 0;
+ top_priv->axi_vote_control[i] = CAM_VFE_BW_CONTROL_EXCLUDE;
+
+
+ if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
+ top_priv->mux_rsrc[i].res_id =
+ CAM_ISP_HW_VFE_IN_CAMIF;
+
+ rc = cam_vfe_camif_ver2_init(hw_intf, soc_info,
+ &ver2_hw_info->camif_hw_info,
+ &top_priv->mux_rsrc[i]);
+ if (rc)
+ goto deinit_resources;
+ } else if (ver2_hw_info->mux_type[i] ==
+ CAM_VFE_CAMIF_LITE_VER_2_0) {
+ top_priv->mux_rsrc[i].res_id =
+ CAM_ISP_HW_VFE_IN_CAMIF_LITE;
+
+ rc = cam_vfe_camif_lite_ver2_init(hw_intf, soc_info,
+ &ver2_hw_info->camif_lite_hw_info,
+ &top_priv->mux_rsrc[i]);
+
+ if (rc)
+ goto deinit_resources;
+ } else if (ver2_hw_info->mux_type[i] ==
+ CAM_VFE_RDI_VER_1_0) {
+ /* set the RDI resource id */
+ top_priv->mux_rsrc[i].res_id =
+ CAM_ISP_HW_VFE_IN_RDI0 + j++;
+
+ rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
+ &ver2_hw_info->rdi_hw_info,
+ &top_priv->mux_rsrc[i]);
+ if (rc)
+ goto deinit_resources;
+ } else {
+ CAM_WARN(CAM_ISP, "Invalid mux type: %u",
+ ver2_hw_info->mux_type[i]);
+ }
+ }
+
+ vfe_top->hw_ops.get_hw_caps = cam_vfe_top_get_hw_caps;
+ vfe_top->hw_ops.init = cam_vfe_top_init_hw;
+ vfe_top->hw_ops.reset = cam_vfe_top_reset;
+ vfe_top->hw_ops.reserve = cam_vfe_top_reserve;
+ vfe_top->hw_ops.release = cam_vfe_top_release;
+ vfe_top->hw_ops.start = cam_vfe_top_start;
+ vfe_top->hw_ops.stop = cam_vfe_top_stop;
+ vfe_top->hw_ops.read = cam_vfe_top_read;
+ vfe_top->hw_ops.write = cam_vfe_top_write;
+ vfe_top->hw_ops.process_cmd = cam_vfe_top_process_cmd;
+ *vfe_top_ptr = vfe_top;
+
+ top_priv->common_data.soc_info = soc_info;
+ top_priv->common_data.hw_intf = hw_intf;
+ top_priv->common_data.common_reg = ver2_hw_info->common_reg;
+
+ return rc;
+
+deinit_resources:
+ for (--i; i >= 0; i--) {
+ if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
+ if (cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]))
+ CAM_ERR(CAM_ISP, "Camif Deinit failed");
+ } else if (ver2_hw_info->mux_type[i] ==
+ CAM_VFE_CAMIF_LITE_VER_2_0) {
+ if (cam_vfe_camif_lite_ver2_deinit(
+ &top_priv->mux_rsrc[i]))
+ CAM_ERR(CAM_ISP, "Camif lite deinit failed");
+ } else {
+ if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
+ CAM_ERR(CAM_ISP, "RDI Deinit failed");
+ }
+ top_priv->mux_rsrc[i].res_state =
+ CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ }
+
+ kfree(vfe_top->top_priv);
+free_vfe_top:
+ kfree(vfe_top);
+end:
+ return rc;
+}
+
+int cam_vfe_top_ver2_deinit(struct cam_vfe_top **vfe_top_ptr)
+{
+ int i, rc = 0;
+ struct cam_vfe_top_ver2_priv *top_priv = NULL;
+ struct cam_vfe_top *vfe_top;
+
+ if (!vfe_top_ptr) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input");
+ return -EINVAL;
+ }
+
+ vfe_top = *vfe_top_ptr;
+ if (!vfe_top) {
+ CAM_ERR(CAM_ISP, "Error! vfe_top NULL");
+ return -ENODEV;
+ }
+
+ top_priv = vfe_top->top_priv;
+ if (!top_priv) {
+ CAM_ERR(CAM_ISP, "Error! vfe_top_priv NULL");
+ rc = -ENODEV;
+ goto free_vfe_top;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ top_priv->mux_rsrc[i].res_state =
+ CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ if (top_priv->mux_rsrc[i].res_type ==
+ CAM_VFE_CAMIF_VER_2_0) {
+ rc = cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
+ rc);
+ } else if (top_priv->mux_rsrc[i].res_type ==
+ CAM_VFE_CAMIF_LITE_VER_2_0) {
+ rc = cam_vfe_camif_lite_ver2_deinit(
+ &top_priv->mux_rsrc[i]);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Camif lite deinit failed rc=%d", rc);
+ } else if (top_priv->mux_rsrc[i].res_type ==
+ CAM_VFE_RDI_VER_1_0) {
+ rc = cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]);
+ if (rc)
+ CAM_ERR(CAM_ISP, "RDI deinit failed rc=%d", rc);
+ }
+ }
+
+ kfree(vfe_top->top_priv);
+
+free_vfe_top:
+ kfree(vfe_top);
+ *vfe_top_ptr = NULL;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
new file mode 100644
index 000000000000..11ca78cd8a2e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -0,0 +1,68 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_TOP_VER2_H_
+#define _CAM_VFE_TOP_VER2_H_
+
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_camif_lite_ver2.h"
+#include "cam_vfe_rdi.h"
+
+#define CAM_VFE_TOP_VER2_MUX_MAX 5
+
+enum cam_vfe_top_ver2_module_type {
+ CAM_VFE_TOP_VER2_MODULE_LENS,
+ CAM_VFE_TOP_VER2_MODULE_STATS,
+ CAM_VFE_TOP_VER2_MODULE_COLOR,
+ CAM_VFE_TOP_VER2_MODULE_ZOOM,
+ CAM_VFE_TOP_VER2_MODULE_MAX,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl {
+ uint32_t reset;
+ uint32_t cgc_ovd;
+ uint32_t enable;
+};
+
+struct cam_vfe_top_ver2_reg_offset_common {
+ uint32_t hw_version;
+ uint32_t hw_capability;
+ uint32_t lens_feature;
+ uint32_t stats_feature;
+ uint32_t color_feature;
+ uint32_t zoom_feature;
+ uint32_t global_reset_cmd;
+ struct cam_vfe_top_ver2_reg_offset_module_ctrl
+ *module_ctrl[CAM_VFE_TOP_VER2_MODULE_MAX];
+ uint32_t bus_cgc_ovd;
+ uint32_t core_cfg;
+ uint32_t three_D_cfg;
+ uint32_t violation_status;
+ uint32_t reg_update_cmd;
+};
+
+struct cam_vfe_top_ver2_hw_info {
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_camif_ver2_hw_info camif_hw_info;
+ struct cam_vfe_camif_lite_ver2_hw_info camif_lite_hw_info;
+ struct cam_vfe_rdi_ver2_hw_info rdi_hw_info;
+ uint32_t mux_type[CAM_VFE_TOP_VER2_MUX_MAX];
+};
+
+int cam_vfe_top_ver2_init(struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ void *top_hw_info,
+ struct cam_vfe_top **vfe_top);
+
+int cam_vfe_top_ver2_deinit(struct cam_vfe_top **vfe_top);
+
+#endif /* _CAM_VFE_TOP_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
new file mode 100644
index 000000000000..90c75291bd73
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_TOP_H_
+#define _CAM_VFE_TOP_H_
+
+#include "cam_hw_intf.h"
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_TOP_VER_1_0 0x100000
+#define CAM_VFE_TOP_VER_2_0 0x200000
+
+#define CAM_VFE_CAMIF_VER_1_0 0x10
+#define CAM_VFE_CAMIF_VER_2_0 0x20
+
+#define CAM_VFE_CAMIF_LITE_VER_2_0 0x02
+
+#define CAM_VFE_RDI_VER_1_0 0x1000
+
+struct cam_vfe_top {
+ void *top_priv;
+ struct cam_hw_ops hw_ops;
+};
+
+int cam_vfe_top_init(uint32_t top_version,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_hw_intf *hw_intf,
+ void *top_hw_info,
+ struct cam_vfe_top **vfe_top);
+
+int cam_vfe_top_deinit(uint32_t top_version,
+ struct cam_vfe_top **vfe_top);
+
+#endif /* _CAM_VFE_TOP_H_*/
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/Makefile b/drivers/media/platform/msm/camera_v3/cam_jpeg/Makefile
new file mode 100644
index 000000000000..091e59100855
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_dev.o cam_jpeg_context.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_context.c
new file mode 100644
index 000000000000..287d4a4162fc
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_context.c
@@ -0,0 +1,210 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+#include "cam_jpeg_context.h"
+#include "cam_context_utils.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+
+static const char jpeg_dev_name[] = "jpeg";
+
+static int cam_jpeg_context_dump_active_request(void *data, unsigned long iova,
+ uint32_t buf_info)
+{
+
+ struct cam_context *ctx = (struct cam_context *)data;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_temp = NULL;
+ struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
+ int rc = 0;
+ int closest_port;
+ bool b_mem_found = false;
+
+
+ if (!ctx) {
+ CAM_ERR(CAM_JPEG, "Invalid ctx");
+ return -EINVAL;
+ }
+
+ CAM_INFO(CAM_JPEG, "iommu fault for jpeg ctx %d state %d",
+ ctx->ctx_id, ctx->state);
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ pf_dbg_entry = &(req->pf_data);
+ closest_port = -1;
+ CAM_INFO(CAM_JPEG, "req_id : %lld ", req->request_id);
+
+ rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+ iova, buf_info, &b_mem_found);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to dump pf info");
+
+ if (b_mem_found)
+ CAM_ERR(CAM_JPEG, "Found page fault in req %lld %d",
+ req->request_id, rc);
+ }
+ return rc;
+}
+
+static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Unable to Acquire device %d", rc);
+ else
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ return rc;
+}
+
+static int __cam_jpeg_ctx_release_dev_in_acquired(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Unable to release device %d", rc);
+
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ return rc;
+}
+
+static int __cam_jpeg_ctx_flush_dev_in_acquired(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_flush_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to flush device");
+
+ return rc;
+}
+
+static int __cam_jpeg_ctx_config_dev_in_acquired(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ return cam_context_prepare_dev_to_hw(ctx, cmd);
+}
+
+static int __cam_jpeg_ctx_handle_buf_done_in_acquired(void *ctx,
+ uint32_t evt_id, void *done)
+{
+ return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+static int __cam_jpeg_ctx_stop_dev_in_acquired(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_stop_dev_to_hw(ctx);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed in Stop dev, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+ cam_jpeg_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+ /* Uninit */
+ {
+ .ioctl_ops = { },
+ .crm_ops = { },
+ .irq_ops = NULL,
+ },
+ /* Available */
+ {
+ .ioctl_ops = {
+ .acquire_dev = __cam_jpeg_ctx_acquire_dev_in_available,
+ },
+ .crm_ops = { },
+ .irq_ops = NULL,
+ },
+ /* Acquired */
+ {
+ .ioctl_ops = {
+ .release_dev = __cam_jpeg_ctx_release_dev_in_acquired,
+ .config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
+ .stop_dev = __cam_jpeg_ctx_stop_dev_in_acquired,
+ .flush_dev = __cam_jpeg_ctx_flush_dev_in_acquired,
+ },
+ .crm_ops = { },
+ .irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
+ .pagefault_ops = cam_jpeg_context_dump_active_request,
+ },
+};
+
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+ struct cam_context *ctx_base,
+ struct cam_hw_mgr_intf *hw_intf,
+ uint32_t ctx_id)
+{
+ int rc;
+ int i;
+
+ if (!ctx || !ctx_base) {
+ CAM_ERR(CAM_JPEG, "Invalid Context");
+ rc = -EFAULT;
+ goto err;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->base = ctx_base;
+
+ for (i = 0; i < CAM_CTX_REQ_MAX; i++)
+ ctx->req_base[i].req_priv = ctx;
+
+ rc = cam_context_init(ctx_base, jpeg_dev_name, CAM_JPEG, ctx_id,
+ NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Camera Context Base init failed");
+ goto err;
+ }
+
+ ctx_base->state_machine = cam_jpeg_ctx_state_machine;
+ ctx_base->ctx_priv = ctx;
+
+err:
+ return rc;
+}
+
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx)
+{
+ if (!ctx || !ctx->base) {
+ CAM_ERR(CAM_JPEG, "Invalid params: %pK", ctx);
+ return -EINVAL;
+ }
+
+ cam_context_deinit(ctx->base);
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_context.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_context.h
new file mode 100644
index 000000000000..1a406793181b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_context.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_CONTEXT_H_
+#define _CAM_JPEG_CONTEXT_H_
+
+#include <uapi/media/cam_jpeg.h>
+
+#include "cam_context.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+
+#define CAM_JPEG_HW_EVENT_MAX 20
+
+/**
+ * struct cam_jpeg_context - Jpeg context
+ * @base: Base jpeg cam context object
+ * @req_base: Common request structure
+ */
+struct cam_jpeg_context {
+ struct cam_context *base;
+ struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+};
+
+/* cam jpeg context irq handling function type */
+typedef int (*cam_jpeg_hw_event_cb_func)(
+ struct cam_jpeg_context *ctx_jpeg,
+ void *evt_data);
+
+/**
+ * struct cam_jpeg_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops: Array of handle function pointers.
+ *
+ */
+struct cam_jpeg_ctx_irq_ops {
+ cam_jpeg_hw_event_cb_func irq_ops[CAM_JPEG_HW_EVENT_MAX];
+};
+
+/**
+ * cam_jpeg_context_init()
+ *
+ * @brief: Initialization function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be initialized
+ * @ctx_base: Context base from cam_context
+ * @hw_intf: JPEG hw manager interface
+ * @ctx_id: ID for this context
+ *
+ */
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+ struct cam_context *ctx_base,
+ struct cam_hw_mgr_intf *hw_intf,
+ uint32_t ctx_id);
+
+/**
+ * cam_jpeg_context_deinit()
+ *
+ * @brief: Deinitialize function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be deinitialized
+ *
+ */
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx);
+
+#endif /* __CAM_JPEG_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_dev.c
new file mode 100644
index 000000000000..14892224e412
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_dev.c
@@ -0,0 +1,209 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/kernel.h>
+
+#include "cam_node.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_dev.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+
+#define CAM_JPEG_DEV_NAME "cam-jpeg"
+
+static struct cam_jpeg_dev g_jpeg_dev;
+
+static void cam_jpeg_dev_iommu_fault_handler(
+ struct iommu_domain *domain, struct device *dev, unsigned long iova,
+ int flags, void *token, uint32_t buf_info)
+{
+ int i = 0;
+ struct cam_node *node = NULL;
+
+ if (!token) {
+ CAM_ERR(CAM_JPEG, "invalid token in page handler cb");
+ return;
+ }
+
+ node = (struct cam_node *)token;
+
+ for (i = 0; i < node->ctx_size; i++)
+ cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+ buf_info);
+}
+
+static const struct of_device_id cam_jpeg_dt_match[] = {
+ {
+ .compatible = "qcom,cam-jpeg"
+ },
+ { }
+};
+
+static int cam_jpeg_subdev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+
+ mutex_lock(&g_jpeg_dev.jpeg_mutex);
+ g_jpeg_dev.open_cnt++;
+ mutex_unlock(&g_jpeg_dev.jpeg_mutex);
+
+ return 0;
+}
+
+static int cam_jpeg_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct cam_node *node = v4l2_get_subdevdata(sd);
+
+
+ mutex_lock(&g_jpeg_dev.jpeg_mutex);
+ if (g_jpeg_dev.open_cnt <= 0) {
+ CAM_DBG(CAM_JPEG, "JPEG subdev is already closed");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ g_jpeg_dev.open_cnt--;
+
+ if (!node) {
+ CAM_ERR(CAM_JPEG, "Node ptr is NULL");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (g_jpeg_dev.open_cnt == 0)
+ cam_node_shutdown(node);
+
+end:
+ mutex_unlock(&g_jpeg_dev.jpeg_mutex);
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops cam_jpeg_subdev_internal_ops = {
+ .close = cam_jpeg_subdev_close,
+ .open = cam_jpeg_subdev_open,
+};
+
+static int cam_jpeg_dev_remove(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "JPEG context %d deinit failed %d",
+ i, rc);
+ }
+
+ rc = cam_subdev_remove(&g_jpeg_dev.sd);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Unregister failed %d", rc);
+
+ return rc;
+}
+
+static int cam_jpeg_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+ struct cam_hw_mgr_intf hw_mgr_intf;
+ struct cam_node *node;
+ int iommu_hdl = -1;
+
+ g_jpeg_dev.sd.internal_ops = &cam_jpeg_subdev_internal_ops;
+ rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME,
+ CAM_JPEG_DEVICE_TYPE);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "JPEG cam_subdev_probe failed %d", rc);
+ goto err;
+ }
+ node = (struct cam_node *)g_jpeg_dev.sd.token;
+
+ rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node,
+ (uint64_t *)&hw_mgr_intf, &iommu_hdl);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc);
+ goto unregister;
+ }
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_jpeg_context_init(&g_jpeg_dev.ctx_jpeg[i],
+ &g_jpeg_dev.ctx[i],
+ &node->hw_mgr_intf,
+ i);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "JPEG context init failed %d %d",
+ i, rc);
+ goto ctx_init_fail;
+ }
+ }
+
+ rc = cam_node_init(node, &hw_mgr_intf, g_jpeg_dev.ctx, CAM_CTX_MAX,
+ CAM_JPEG_DEV_NAME);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "JPEG node init failed %d", rc);
+ goto ctx_init_fail;
+ }
+
+ cam_smmu_set_client_page_fault_handler(iommu_hdl,
+ cam_jpeg_dev_iommu_fault_handler, node);
+
+ mutex_init(&g_jpeg_dev.jpeg_mutex);
+
+ CAM_INFO(CAM_JPEG, "Camera JPEG probe complete");
+
+ return rc;
+
+ctx_init_fail:
+ for (--i; i >= 0; i--)
+ if (cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]))
+ CAM_ERR(CAM_JPEG, "deinit fail %d %d", i, rc);
+unregister:
+ if (cam_subdev_remove(&g_jpeg_dev.sd))
+ CAM_ERR(CAM_JPEG, "remove fail %d", rc);
+err:
+ return rc;
+}
+
+static struct platform_driver jpeg_driver = {
+ .probe = cam_jpeg_dev_probe,
+ .remove = cam_jpeg_dev_remove,
+ .driver = {
+ .name = "cam_jpeg",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_jpeg_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_jpeg_dev_init_module(void)
+{
+ return platform_driver_register(&jpeg_driver);
+}
+
+static void __exit cam_jpeg_dev_exit_module(void)
+{
+ platform_driver_unregister(&jpeg_driver);
+}
+
+module_init(cam_jpeg_dev_init_module);
+module_exit(cam_jpeg_dev_exit_module);
+MODULE_DESCRIPTION("MSM JPEG driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_dev.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_dev.h
new file mode 100644
index 000000000000..0d15ced16e80
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/cam_jpeg_dev.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_DEV_H_
+#define _CAM_JPEG_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_jpeg_context.h"
+
+/**
+ * struct cam_jpeg_dev - Camera JPEG V4l2 device node
+ *
+ * @sd: Commone camera subdevice node
+ * @node: Pointer to jpeg subdevice
+ * @ctx: JPEG base context storage
+ * @ctx_jpeg: JPEG private context storage
+ * @jpeg_mutex: Jpeg dev mutex
+ * @open_cnt: Open device count
+ */
+struct cam_jpeg_dev {
+ struct cam_subdev sd;
+ struct cam_node *node;
+ struct cam_context ctx[CAM_CTX_MAX];
+ struct cam_jpeg_context ctx_jpeg[CAM_CTX_MAX];
+ struct mutex jpeg_mutex;
+ int32_t open_cnt;
+};
+#endif /* __CAM_JPEG_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/Makefile
new file mode 100644
index 000000000000..99b73b264255
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
new file mode 100644
index 000000000000..24bb154f922c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -0,0 +1,1614 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_debug_util.h"
+#include "cam_common_util.h"
+
+#define CAM_JPEG_HW_ENTRIES_MAX 20
+#define CAM_JPEG_CHBASE 0
+#define CAM_JPEG_CFG 1
+#define CAM_JPEG_PARAM 2
+
+static struct cam_jpeg_hw_mgr g_jpeg_hw_mgr;
+
+static int32_t cam_jpeg_hw_mgr_cb(uint32_t irq_status,
+ int32_t result_size, void *data);
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data);
+
+static int cam_jpeg_mgr_process_irq(void *priv, void *data)
+{
+ int rc = 0;
+ struct cam_jpeg_process_irq_work_data_t *task_data;
+ struct cam_jpeg_hw_mgr *hw_mgr;
+ int32_t i;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ struct cam_hw_done_event_data buf_data;
+ struct cam_jpeg_set_irq_cb irq_cb;
+ uintptr_t dev_type = 0;
+ uintptr_t kaddr;
+ uint32_t *cmd_buf_kaddr;
+ size_t cmd_buf_len;
+ struct cam_jpeg_config_inout_param_info *p_params;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+ struct crm_workq_task *task;
+ struct cam_jpeg_process_frame_work_data_t *wq_task_data;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_JPEG, "Invalid data");
+ return -EINVAL;
+ }
+
+ task_data = data;
+ hw_mgr = &g_jpeg_hw_mgr;
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)task_data->data;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ return -EINVAL;
+ }
+
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+
+ p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+
+ if (hw_mgr->device_in_use[dev_type][0] == false ||
+ p_cfg_req == NULL) {
+ CAM_ERR(CAM_JPEG, "irq for old request %d", rc);
+ mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+ irq_cb.data = NULL;
+ irq_cb.b_set_cb = false;
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+ CAM_ERR(CAM_JPEG, "process_cmd null ");
+ mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+ return -EINVAL;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ CAM_JPEG_CMD_SET_IRQ_CB,
+ &irq_cb, sizeof(irq_cb));
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "CMD_SET_IRQ_CB failed %d", rc);
+ mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+ return rc;
+ }
+
+ if (hw_mgr->devices[dev_type][0]->hw_ops.deinit) {
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.deinit(
+ hw_mgr->devices[dev_type][0]->hw_priv, NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to Deinit %lu HW", dev_type);
+ }
+
+ hw_mgr->device_in_use[dev_type][0] = false;
+ hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
+ mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+
+ task = cam_req_mgr_workq_get_task(
+ g_jpeg_hw_mgr.work_process_frame);
+ if (!task) {
+ CAM_ERR(CAM_JPEG, "no empty task");
+ return -EINVAL;
+ }
+
+ wq_task_data = (struct cam_jpeg_process_frame_work_data_t *)
+ task->payload;
+ if (!task_data) {
+ CAM_ERR(CAM_JPEG, "task_data is NULL");
+ return -EINVAL;
+ }
+ wq_task_data->data = (void *)dev_type;
+ wq_task_data->request_id = 0;
+ wq_task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_jpeg_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "could not enque task %d", rc);
+ return rc;
+ }
+
+ rc = cam_mem_get_cpu_buf(
+ p_cfg_req->hw_cfg_args.hw_update_entries[CAM_JPEG_PARAM].handle,
+ &kaddr, &cmd_buf_len);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "unable to get info for cmd buf: %x %d",
+ hw_mgr->iommu_hdl, rc);
+ return rc;
+ }
+
+ cmd_buf_kaddr = (uint32_t *)kaddr;
+
+ cmd_buf_kaddr =
+ (cmd_buf_kaddr +
+ (p_cfg_req->hw_cfg_args.hw_update_entries[CAM_JPEG_PARAM].offset
+ / sizeof(uint32_t)));
+
+ p_params = (struct cam_jpeg_config_inout_param_info *)cmd_buf_kaddr;
+
+ p_params->output_size = task_data->result_size;
+ CAM_DBG(CAM_JPEG, "Encoded Size %d", task_data->result_size);
+
+ buf_data.num_handles =
+ p_cfg_req->hw_cfg_args.num_out_map_entries;
+ for (i = 0; i < buf_data.num_handles; i++) {
+ buf_data.resource_handle[i] =
+ p_cfg_req->hw_cfg_args.out_map_entries[i].resource_handle;
+ }
+ buf_data.request_id =
+ PTR_TO_U64(p_cfg_req->hw_cfg_args.priv);
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+
+ list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+ return rc;
+}
+
+static int cam_jpeg_hw_mgr_cb(
+ uint32_t irq_status, int32_t result_size, void *data)
+{
+ int32_t rc;
+ unsigned long flags;
+ struct cam_jpeg_hw_mgr *hw_mgr = &g_jpeg_hw_mgr;
+ struct crm_workq_task *task;
+ struct cam_jpeg_process_irq_work_data_t *task_data;
+
+ spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+ task = cam_req_mgr_workq_get_task(
+ g_jpeg_hw_mgr.work_process_irq_cb);
+ if (!task) {
+ CAM_ERR(CAM_JPEG, "no empty task");
+ spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+ return -ENOMEM;
+ }
+
+ task_data = (struct cam_jpeg_process_irq_work_data_t *)task->payload;
+ task_data->data = data;
+ task_data->irq_status = irq_status;
+ task_data->result_size = result_size;
+ task_data->type = CAM_JPEG_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_jpeg_mgr_process_irq;
+
+ rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_get_free_ctx(struct cam_jpeg_hw_mgr *hw_mgr)
+{
+ int i = 0;
+ int num_ctx = CAM_JPEG_CTX_MAX;
+
+ for (i = 0; i < num_ctx; i++) {
+ mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+ if (hw_mgr->ctx_data[i].in_use == false) {
+ hw_mgr->ctx_data[i].in_use = true;
+ mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+ break;
+ }
+ mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+ }
+
+ return i;
+}
+
+
+static int cam_jpeg_mgr_release_ctx(
+ struct cam_jpeg_hw_mgr *hw_mgr, struct cam_jpeg_hw_ctx_data *ctx_data)
+{
+ if (!ctx_data) {
+ CAM_ERR(CAM_JPEG, "invalid ctx_data %pK", ctx_data);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is already un-used: %pK", ctx_data);
+ mutex_unlock(&ctx_data->ctx_mutex);
+ return -EINVAL;
+ }
+
+ ctx_data->in_use = false;
+ mutex_unlock(&ctx_data->ctx_mutex);
+
+ return 0;
+}
+
+static int cam_jpeg_insert_cdm_change_base(
+ struct cam_hw_config_args *config_args,
+ struct cam_jpeg_hw_ctx_data *ctx_data,
+ struct cam_jpeg_hw_mgr *hw_mgr)
+{
+ int rc;
+ uint32_t dev_type;
+ struct cam_cdm_bl_request *cdm_cmd;
+ uint32_t size;
+ uint32_t mem_cam_base;
+ uintptr_t iova_addr;
+ uint32_t *ch_base_iova_addr;
+ size_t ch_base_len;
+
+ rc = cam_mem_get_cpu_buf(
+ config_args->hw_update_entries[CAM_JPEG_CHBASE].handle,
+ &iova_addr, &ch_base_len);
+ if (rc) {
+ CAM_ERR(CAM_JPEG,
+ "unable to get src buf info for cmd buf: %d", rc);
+ return rc;
+ }
+ CAM_DBG(CAM_JPEG, "iova %pK len %zu offset %d",
+ (void *)iova_addr, ch_base_len,
+ config_args->hw_update_entries[CAM_JPEG_CHBASE].offset);
+ ch_base_iova_addr = (uint32_t *)iova_addr;
+ ch_base_iova_addr = (ch_base_iova_addr +
+ (config_args->hw_update_entries[CAM_JPEG_CHBASE].offset /
+ sizeof(uint32_t)));
+
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+ mem_cam_base = hw_mgr->cdm_reg_map[dev_type][0]->mem_cam_base;
+ size =
+ hw_mgr->cdm_info[dev_type][0].cdm_ops->cdm_required_size_changebase();
+ hw_mgr->cdm_info[dev_type][0].cdm_ops->cdm_write_changebase(
+ ch_base_iova_addr, mem_cam_base);
+
+ cdm_cmd = ctx_data->cdm_cmd;
+ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle =
+ config_args->hw_update_entries[CAM_JPEG_CHBASE].handle;
+ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].offset =
+ config_args->hw_update_entries[CAM_JPEG_CHBASE].offset;
+ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].len = size * sizeof(uint32_t);
+ cdm_cmd->cmd_arrary_count++;
+
+ ch_base_iova_addr += size;
+ *ch_base_iova_addr = 0;
+ ch_base_iova_addr += size;
+ *ch_base_iova_addr = 0;
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
+{
+ int rc;
+ int i = 0;
+ struct cam_jpeg_hw_mgr *hw_mgr = priv;
+ struct cam_hw_update_entry *cmd;
+ struct cam_cdm_bl_request *cdm_cmd;
+ struct cam_hw_config_args *config_args = NULL;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ uintptr_t request_id = 0;
+ struct cam_jpeg_process_frame_work_data_t *task_data =
+ (struct cam_jpeg_process_frame_work_data_t *)data;
+ uint32_t dev_type;
+ struct cam_jpeg_set_irq_cb irq_cb;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+ struct cam_hw_done_event_data buf_data;
+ struct cam_hw_config_args *hw_cfg_args = NULL;
+
+ if (!hw_mgr || !task_data) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+ hw_mgr, task_data);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ if (list_empty(&hw_mgr->hw_config_req_list)) {
+ CAM_DBG(CAM_JPEG, "no available request");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ p_cfg_req = list_first_entry(&hw_mgr->hw_config_req_list,
+ struct cam_jpeg_hw_cfg_req, list);
+ if (!p_cfg_req) {
+ CAM_ERR(CAM_JPEG, "no request");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ if (false == hw_mgr->device_in_use[p_cfg_req->dev_type][0]) {
+ hw_mgr->device_in_use[p_cfg_req->dev_type][0] = true;
+ hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = p_cfg_req;
+ list_del_init(&p_cfg_req->list);
+ } else {
+ CAM_DBG(CAM_JPEG, "Not dequeing, just return");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
+ request_id = task_data->request_id;
+ if (request_id != (uintptr_t)config_args->priv) {
+ CAM_DBG(CAM_JPEG, "not a recent req %zd %zd",
+ request_id, (uintptr_t)config_args->priv);
+ }
+
+ if (!config_args->num_hw_update_entries) {
+ CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ rc = -EINVAL;
+ goto end_unusedev;
+ }
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ rc = -EINVAL;
+ goto end_unusedev;
+ }
+
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ if (dev_type != p_cfg_req->dev_type)
+ CAM_WARN(CAM_JPEG, "dev types not same something wrong");
+
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.init) {
+ CAM_ERR(CAM_JPEG, "hw op init null ");
+ rc = -EFAULT;
+ goto end;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.init(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ ctx_data,
+ sizeof(ctx_data));
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type);
+ goto end;
+ }
+
+ irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+ irq_cb.data = (void *)ctx_data;
+ irq_cb.b_set_cb = true;
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+ CAM_ERR(CAM_JPEG, "op process_cmd null ");
+ rc = -EFAULT;
+ goto end_callcb;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ CAM_JPEG_CMD_SET_IRQ_CB,
+ &irq_cb, sizeof(irq_cb));
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "SET_IRQ_CB failed %d", rc);
+ goto end_callcb;
+ }
+
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.reset) {
+ CAM_ERR(CAM_JPEG, "op reset null ");
+ rc = -EFAULT;
+ goto end_callcb;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.reset(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "jpeg hw reset failed %d", rc);
+ goto end_callcb;
+ }
+
+ cdm_cmd = ctx_data->cdm_cmd;
+ cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+ cdm_cmd->flag = false;
+ cdm_cmd->userdata = NULL;
+ cdm_cmd->cookie = 0;
+ cdm_cmd->cmd_arrary_count = 0;
+
+ rc = cam_jpeg_insert_cdm_change_base(config_args,
+ ctx_data, hw_mgr);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "insert change base failed %d", rc);
+ goto end_callcb;
+ }
+
+ CAM_DBG(CAM_JPEG, "num hw up %d", config_args->num_hw_update_entries);
+ for (i = CAM_JPEG_CFG; i < (config_args->num_hw_update_entries - 1);
+ i++) {
+ cmd = (config_args->hw_update_entries + i);
+ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle
+ = cmd->handle;
+ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].offset =
+ cmd->offset;
+ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].len =
+ cmd->len;
+ CAM_DBG(CAM_JPEG, "i %d entry h %d o %d l %d",
+ i, cmd->handle, cmd->offset, cmd->len);
+ cdm_cmd->cmd_arrary_count++;
+ }
+
+ rc = cam_cdm_submit_bls(
+ hw_mgr->cdm_info[dev_type][0].cdm_handle, cdm_cmd);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed to apply the configs %d", rc);
+ goto end_callcb;
+ }
+
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.start) {
+ CAM_ERR(CAM_JPEG, "op start null ");
+ rc = -EINVAL;
+ goto end_callcb;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.start(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed to start hw %d",
+ rc);
+ goto end_callcb;
+ }
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+
+end_callcb:
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ if (p_cfg_req) {
+ hw_cfg_args = &p_cfg_req->hw_cfg_args;
+ buf_data.num_handles =
+ hw_cfg_args->num_out_map_entries;
+ for (i = 0; i < buf_data.num_handles; i++) {
+ buf_data.resource_handle[i] =
+ hw_cfg_args->out_map_entries[i].resource_handle;
+ }
+ buf_data.request_id =
+ (uintptr_t)p_cfg_req->hw_cfg_args.priv;
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+ }
+
+end_unusedev:
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ hw_mgr->device_in_use[p_cfg_req->dev_type][0] = false;
+ hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = NULL;
+
+end:
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+}
+
+static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+ int rc;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_config_args *config_args = config_hw_args;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ uintptr_t request_id = 0;
+ struct cam_hw_update_entry *hw_update_entries;
+ struct crm_workq_task *task;
+ struct cam_jpeg_process_frame_work_data_t *task_data;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+
+ if (!hw_mgr || !config_args) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+ hw_mgr, config_args);
+ return -EINVAL;
+ }
+
+ if (!config_args->num_hw_update_entries) {
+ CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ if (list_empty(&hw_mgr->free_req_list)) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_JPEG, "list empty");
+ return -ENOMEM;
+ }
+
+ p_cfg_req = list_first_entry(&hw_mgr->free_req_list,
+ struct cam_jpeg_hw_cfg_req, list);
+ list_del_init(&p_cfg_req->list);
+
+ /* Update Currently Processing Config Request */
+ p_cfg_req->hw_cfg_args = *config_args;
+ p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ request_id = (uintptr_t)config_args->priv;
+ p_cfg_req->req_id = request_id;
+ hw_update_entries = config_args->hw_update_entries;
+ CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %zd",
+ ctx_data, request_id, (uintptr_t)config_args->priv);
+ task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame);
+ if (!task) {
+ CAM_ERR(CAM_JPEG, "no empty task");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ rc = -ENOMEM;
+ goto err_after_dq_free_list;
+ }
+
+
+ task_data = (struct cam_jpeg_process_frame_work_data_t *)
+ task->payload;
+ if (!task_data) {
+ CAM_ERR(CAM_JPEG, "task_data is NULL");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ rc = -EINVAL;
+ goto err_after_dq_free_list;
+ }
+ CAM_DBG(CAM_JPEG, "cfge %pK num %d",
+ p_cfg_req->hw_cfg_args.hw_update_entries,
+ p_cfg_req->hw_cfg_args.num_hw_update_entries);
+
+ list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ task_data->data = (void *)(uintptr_t)p_cfg_req->dev_type;
+ task_data->request_id = request_id;
+ task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_jpeg_mgr_process_cmd;
+
+ rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "failed to enqueue task %d", rc);
+ goto err_after_get_task;
+ }
+
+ return rc;
+
+err_after_get_task:
+ list_del_init(&p_cfg_req->list);
+err_after_dq_free_list:
+ list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+ return rc;
+}
+
+static void cam_jpeg_mgr_print_io_bufs(struct cam_packet *packet,
+ int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+ bool *mem_found)
+{
+ uint64_t iova_addr;
+ size_t src_buf_size;
+ int i;
+ int j;
+ int rc = 0;
+ int32_t mmu_hdl;
+ struct cam_buf_io_cfg *io_cfg = NULL;
+
+ if (mem_found)
+ *mem_found = false;
+
+ io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+ packet->io_configs_offset / 4);
+
+ for (i = 0; i < packet->num_io_configs; i++) {
+ for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+ if (!io_cfg[i].mem_handle[j])
+ break;
+
+ if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+ GET_FD_FROM_HANDLE(pf_buf_info)) {
+ CAM_INFO(CAM_JPEG,
+ "Found PF at port: %d mem %x fd: %x",
+ io_cfg[i].resource_type,
+ io_cfg[i].mem_handle[j],
+ pf_buf_info);
+ if (mem_found)
+ *mem_found = true;
+ }
+
+ CAM_INFO(CAM_JPEG, "port: %d f: %u format: %d dir %d",
+ io_cfg[i].resource_type,
+ io_cfg[i].fence,
+ io_cfg[i].format,
+ io_cfg[i].direction);
+
+ mmu_hdl = cam_mem_is_secure_buf(
+ io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+ iommu_hdl;
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+ mmu_hdl, &iova_addr, &src_buf_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_UTIL, "get src buf address fail");
+ continue;
+ }
+ if (iova_addr >> 32) {
+ CAM_ERR(CAM_JPEG, "Invalid mapped address");
+ rc = -EINVAL;
+ continue;
+ }
+
+ CAM_INFO(CAM_JPEG,
+ "pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+ j, io_cfg[i].planes[j].width,
+ io_cfg[i].planes[j].height,
+ (int32_t)src_buf_size,
+ (unsigned int)iova_addr,
+ io_cfg[i].offsets[j],
+ io_cfg[i].mem_handle[j]);
+
+ iova_addr += io_cfg[i].offsets[j];
+ }
+ }
+}
+
+static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
+ void *prepare_hw_update_args)
+{
+ int rc, i, j, k;
+ struct cam_hw_prepare_update_args *prepare_args =
+ prepare_hw_update_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ struct cam_packet *packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+ struct cam_kmd_buf_info kmd_buf;
+
+ if (!prepare_args || !hw_mgr) {
+ CAM_ERR(CAM_JPEG, "Invalid args %pK %pK",
+ prepare_args, hw_mgr);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)prepare_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ packet = prepare_args->packet;
+ if (!packet) {
+ CAM_ERR(CAM_JPEG, "received packet is NULL");
+ return -EINVAL;
+ }
+
+ if (((packet->header.op_code & 0xff) != CAM_JPEG_OPCODE_ENC_UPDATE) &&
+ ((packet->header.op_code
+ & 0xff) != CAM_JPEG_OPCODE_DMA_UPDATE)) {
+ CAM_ERR(CAM_JPEG, "Invalid Opcode in pkt: %d",
+ packet->header.op_code & 0xff);
+ return -EINVAL;
+ }
+
+ rc = cam_packet_util_validate_packet(packet);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "invalid packet %d", rc);
+ return rc;
+ }
+
+ if ((packet->num_cmd_buf > 5) || !packet->num_patches ||
+ !packet->num_io_configs) {
+ CAM_ERR(CAM_JPEG, "wrong number of cmd/patch info: %u %u",
+ packet->num_cmd_buf,
+ packet->num_patches);
+ return -EINVAL;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *)
+ ((uint32_t *)&packet->payload +
+ (packet->cmd_buf_offset / 4));
+ CAM_DBG(CAM_JPEG, "packet = %pK cmd_desc = %pK size = %lu",
+ (void *)packet, (void *)cmd_desc,
+ sizeof(struct cam_cmd_buf_desc));
+
+ rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl, -1);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc);
+ return rc;
+ }
+
+ io_cfg_ptr = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+ packet->io_configs_offset / 4);
+ CAM_DBG(CAM_JPEG, "packet = %pK io_cfg_ptr = %pK size = %lu",
+ (void *)packet, (void *)io_cfg_ptr,
+ sizeof(struct cam_buf_io_cfg));
+ prepare_args->pf_data->packet = packet;
+
+ prepare_args->num_out_map_entries = 0;
+
+ for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
+ if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+ prepare_args->in_map_entries[j].resource_handle =
+ io_cfg_ptr[i].resource_type;
+ prepare_args->in_map_entries[j++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_in_map_entries++;
+ } else {
+ prepare_args->in_map_entries[k].resource_handle =
+ io_cfg_ptr[i].resource_type;
+ prepare_args->out_map_entries[k++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_out_map_entries++;
+ }
+ CAM_DBG(CAM_JPEG, "dir[%d]: %u, fence: %u",
+ i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
+ }
+
+
+ j = prepare_args->num_hw_update_entries;
+ rc = cam_packet_util_get_kmd_buffer(packet, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "get kmd buf failed %d", rc);
+ return rc;
+ }
+ /* fill kmd buf info into 1st hw update entry */
+ prepare_args->hw_update_entries[j].len =
+ (uint32_t)kmd_buf.used_bytes;
+ prepare_args->hw_update_entries[j].handle =
+ (uint32_t)kmd_buf.handle;
+ prepare_args->hw_update_entries[j].offset =
+ (uint32_t)kmd_buf.offset;
+ j++;
+
+ for (i = 0; i < packet->num_cmd_buf; i++, j++) {
+ prepare_args->hw_update_entries[j].len =
+ (uint32_t)cmd_desc[i].length;
+ prepare_args->hw_update_entries[j].handle =
+ (uint32_t)cmd_desc[i].mem_handle;
+ prepare_args->hw_update_entries[j].offset =
+ (uint32_t)cmd_desc[i].offset;
+ }
+ prepare_args->num_hw_update_entries = j;
+ prepare_args->priv = (void *)(uintptr_t)packet->header.request_id;
+
+ CAM_DBG(CAM_JPEG, "will wait on input sync sync_id %d",
+ prepare_args->in_map_entries[0].sync_id);
+
+ return rc;
+}
+
+static void cam_jpeg_mgr_stop_deinit_dev(struct cam_jpeg_hw_mgr *hw_mgr,
+ struct cam_jpeg_hw_cfg_req *p_cfg_req, uint32_t dev_type)
+{
+ int rc = 0;
+ struct cam_jpeg_set_irq_cb irq_cb;
+
+ /* stop reset Unregister CB and deinit */
+ irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+ irq_cb.data = NULL;
+ irq_cb.b_set_cb = false;
+ if (hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ CAM_JPEG_CMD_SET_IRQ_CB,
+ &irq_cb, sizeof(irq_cb));
+ if (rc)
+ CAM_ERR(CAM_JPEG, "SET_IRQ_CB fail %d", rc);
+ } else {
+ CAM_ERR(CAM_JPEG, "process_cmd null %d", dev_type);
+ }
+
+ if (hw_mgr->devices[dev_type][0]->hw_ops.stop) {
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.stop(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "stop fail %d", rc);
+ } else {
+ CAM_ERR(CAM_JPEG, "op stop null %d", dev_type);
+ }
+
+ if (hw_mgr->devices[dev_type][0]->hw_ops.deinit) {
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.deinit(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to Deinit %d HW %d",
+ dev_type, rc);
+ } else {
+ CAM_ERR(CAM_JPEG, "op deinit null %d", dev_type);
+ }
+
+ hw_mgr->device_in_use[dev_type][0] = false;
+ hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
+}
+
+static int cam_jpeg_mgr_flush(void *hw_mgr_priv,
+ struct cam_jpeg_hw_ctx_data *ctx_data)
+{
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ uint32_t dev_type;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+ struct cam_jpeg_hw_cfg_req *cfg_req = NULL, *req_temp = NULL;
+
+ CAM_DBG(CAM_JPEG, "E: JPEG flush ctx");
+
+ if (!hw_mgr || !ctx_data) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+ if (hw_mgr->device_in_use[dev_type][0] == true &&
+ p_cfg_req != NULL) {
+ if ((struct cam_jpeg_hw_ctx_data *)
+ p_cfg_req->hw_cfg_args.ctxt_to_hw_map == ctx_data) {
+ cam_jpeg_mgr_stop_deinit_dev(hw_mgr, p_cfg_req,
+ dev_type);
+ list_del_init(&p_cfg_req->list);
+ list_add_tail(&p_cfg_req->list,
+ &hw_mgr->free_req_list);
+ }
+ }
+
+ list_for_each_entry_safe(cfg_req, req_temp,
+ &hw_mgr->hw_config_req_list, list) {
+ if ((struct cam_jpeg_hw_ctx_data *)
+ cfg_req->hw_cfg_args.ctxt_to_hw_map != ctx_data)
+ continue;
+
+ list_del_init(&cfg_req->list);
+ list_add_tail(&cfg_req->list, &hw_mgr->free_req_list);
+ }
+
+ CAM_DBG(CAM_JPEG, "X: JPEG flush ctx");
+
+ return 0;
+}
+
+static int cam_jpeg_mgr_flush_req(void *hw_mgr_priv,
+ struct cam_jpeg_hw_ctx_data *ctx_data,
+ struct cam_hw_flush_args *flush_args)
+{
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_cfg_req *cfg_req = NULL;
+ struct cam_jpeg_hw_cfg_req *req_temp = NULL;
+ long request_id = 0;
+ uint32_t dev_type;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+ bool b_req_found = false;
+
+ CAM_DBG(CAM_JPEG, "E: JPEG flush req");
+
+ if (!hw_mgr || !ctx_data || !flush_args) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+
+ if (flush_args->num_req_pending)
+ return 0;
+
+ request_id = (uintptr_t)flush_args->flush_req_active[0];
+
+ if (!flush_args->num_req_active)
+ return 0;
+
+ if (request_id <= 0) {
+ CAM_ERR(CAM_JPEG, "Invalid red id %ld", request_id);
+ return -EINVAL;
+ }
+
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+ if (hw_mgr->device_in_use[dev_type][0] == true &&
+ p_cfg_req != NULL) {
+ if (((struct cam_jpeg_hw_ctx_data *)
+ p_cfg_req->hw_cfg_args.ctxt_to_hw_map == ctx_data) &&
+ (p_cfg_req->req_id == request_id)) {
+ cam_jpeg_mgr_stop_deinit_dev(hw_mgr, p_cfg_req,
+ dev_type);
+ list_del_init(&p_cfg_req->list);
+ list_add_tail(&p_cfg_req->list,
+ &hw_mgr->free_req_list);
+ b_req_found = true;
+ }
+ }
+
+ list_for_each_entry_safe(cfg_req, req_temp,
+ &hw_mgr->hw_config_req_list, list) {
+ if ((struct cam_jpeg_hw_ctx_data *)
+ cfg_req->hw_cfg_args.ctxt_to_hw_map != ctx_data)
+ continue;
+
+ if (cfg_req->req_id != request_id)
+ continue;
+
+ list_del_init(&cfg_req->list);
+ list_add_tail(&cfg_req->list, &hw_mgr->free_req_list);
+ b_req_found = true;
+ break;
+ }
+
+ if (!b_req_found) {
+ CAM_ERR(CAM_JPEG, "req not found %ld", request_id);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_JPEG, "X: JPEG flush req");
+ return 0;
+}
+
+static int cam_jpeg_mgr_hw_flush(void *hw_mgr_priv, void *flush_hw_args)
+{
+ int rc = 0;
+ struct cam_hw_flush_args *flush_args = flush_hw_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+
+ if (!hw_mgr || !flush_args || !flush_args->ctxt_to_hw_map) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)flush_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+ (flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+ CAM_ERR(CAM_JPEG, "Invalid flush type: %d",
+ flush_args->flush_type);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ switch (flush_args->flush_type) {
+ case CAM_FLUSH_TYPE_ALL:
+ rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data);
+ if ((rc))
+ CAM_ERR(CAM_JPEG, "Flush failed %d", rc);
+ break;
+ case CAM_FLUSH_TYPE_REQ:
+ rc = cam_jpeg_mgr_flush_req(hw_mgr_priv, ctx_data, flush_args);
+ break;
+ default:
+ CAM_ERR(CAM_JPEG, "Invalid flush type: %d",
+ flush_args->flush_type);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_hw_stop(void *hw_mgr_priv, void *stop_hw_args)
+{
+ int rc;
+ struct cam_hw_stop_args *stop_args =
+ (struct cam_hw_stop_args *)stop_hw_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+
+ if (!hw_mgr || !stop_args || !stop_args->ctxt_to_hw_map) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)stop_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data);
+ if ((rc))
+ CAM_ERR(CAM_JPEG, "flush failed %d", rc);
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+ int rc;
+ struct cam_hw_release_args *release_hw = release_hw_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ uint32_t dev_type;
+
+ if (!hw_mgr || !release_hw || !release_hw->ctxt_to_hw_map) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)release_hw->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 0) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_JPEG, "Error Unbalanced deinit");
+ return -EFAULT;
+ }
+
+ hw_mgr->cdm_info[dev_type][0].ref_cnt--;
+ if (!(hw_mgr->cdm_info[dev_type][0].ref_cnt)) {
+ if (cam_cdm_stream_off(
+ hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+ CAM_ERR(CAM_JPEG, "CDM stream off failed %d",
+ hw_mgr->cdm_info[dev_type][0].cdm_handle);
+ }
+ /* release cdm handle */
+ cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+ }
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ rc = cam_jpeg_mgr_release_ctx(hw_mgr, ctx_data);
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_JPEG, "JPEG release ctx failed");
+ kfree(ctx_data->cdm_cmd);
+ ctx_data->cdm_cmd = NULL;
+
+ return -EINVAL;
+ }
+
+ kfree(ctx_data->cdm_cmd);
+ ctx_data->cdm_cmd = NULL;
+ CAM_DBG(CAM_JPEG, "handle %llu", ctx_data);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+ int rc = 0;
+ int32_t ctx_id = 0;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ struct cam_hw_acquire_args *args = acquire_hw_args;
+ struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+ struct cam_cdm_acquire_data cdm_acquire;
+ uint32_t dev_type;
+ uint32_t size = 0;
+
+ if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+ CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK", hw_mgr_priv,
+ acquire_hw_args);
+ return -EINVAL;
+ }
+
+ if (args->num_acq > 1) {
+ CAM_ERR(CAM_JPEG,
+ "number of resources are wrong: %u",
+ args->num_acq);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&jpeg_dev_acquire_info,
+ (void __user *)args->acquire_info,
+ sizeof(jpeg_dev_acquire_info))) {
+ CAM_ERR(CAM_JPEG, "copy failed");
+ return -EFAULT;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ ctx_id = cam_jpeg_mgr_get_free_ctx(hw_mgr);
+ if (ctx_id >= CAM_JPEG_CTX_MAX) {
+ CAM_ERR(CAM_JPEG, "No free ctx space in hw_mgr");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EFAULT;
+ }
+
+ ctx_data = &hw_mgr->ctx_data[ctx_id];
+
+ ctx_data->cdm_cmd =
+ kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+ ((CAM_JPEG_HW_ENTRIES_MAX - 1) *
+ sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+ if (!ctx_data->cdm_cmd) {
+ rc = -ENOMEM;
+ goto jpeg_release_ctx;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ ctx_data->jpeg_dev_acquire_info = jpeg_dev_acquire_info;
+ mutex_unlock(&ctx_data->ctx_mutex);
+
+ if (ctx_data->jpeg_dev_acquire_info.dev_type >=
+ CAM_JPEG_RES_TYPE_MAX) {
+ rc = -EINVAL;
+ goto acq_cdm_hdl_failed;
+ }
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+ if (!hw_mgr->cdm_info[dev_type][0].ref_cnt) {
+
+ if (dev_type == CAM_JPEG_RES_TYPE_ENC) {
+ memcpy(cdm_acquire.identifier,
+ "jpegenc", sizeof("jpegenc"));
+ } else {
+ memcpy(cdm_acquire.identifier,
+ "jpegdma", sizeof("jpegdma"));
+ }
+ cdm_acquire.cell_index = 0;
+ cdm_acquire.handle = 0;
+ cdm_acquire.userdata = ctx_data;
+ if (hw_mgr->cdm_reg_map[dev_type][0]) {
+ cdm_acquire.base_array[0] =
+ hw_mgr->cdm_reg_map[dev_type][0];
+ }
+ cdm_acquire.base_array_cnt = 1;
+ cdm_acquire.id = CAM_CDM_VIRTUAL;
+ cdm_acquire.cam_cdm_callback = NULL;
+
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed to acquire the CDM HW %d",
+ rc);
+ rc = -EFAULT;
+ goto acq_cdm_hdl_failed;
+ }
+ hw_mgr->cdm_info[dev_type][0].cdm_handle = cdm_acquire.handle;
+ hw_mgr->cdm_info[dev_type][0].cdm_ops = cdm_acquire.ops;
+ hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+ } else {
+ hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+ }
+
+ size =
+ hw_mgr->cdm_info[dev_type][0].cdm_ops->cdm_required_size_changebase();
+
+ if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+ if (cam_cdm_stream_on(
+ hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+ CAM_ERR(CAM_JPEG, "Can not start cdm (%d)!",
+ hw_mgr->cdm_info[dev_type][0].cdm_handle);
+ rc = -EFAULT;
+ goto start_cdm_hdl_failed;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ ctx_data->context_priv = args->context_data;
+
+ args->ctxt_to_hw_map = (void *)&(hw_mgr->ctx_data[ctx_id]);
+
+ mutex_unlock(&ctx_data->ctx_mutex);
+
+ hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+
+
+ if (copy_to_user((void __user *)args->acquire_info,
+ &jpeg_dev_acquire_info,
+ sizeof(jpeg_dev_acquire_info))) {
+ rc = -EFAULT;
+ goto copy_to_user_failed;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ CAM_DBG(CAM_JPEG, "success ctx_data= %pK", ctx_data);
+
+ return rc;
+
+copy_to_user_failed:
+ if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+ cam_cdm_stream_off(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+start_cdm_hdl_failed:
+ if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+ cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+ hw_mgr->cdm_info[dev_type][0].ref_cnt--;
+acq_cdm_hdl_failed:
+ kfree(ctx_data->cdm_cmd);
+jpeg_release_ctx:
+ cam_jpeg_mgr_release_ctx(hw_mgr, ctx_data);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+ int rc;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+ if (!hw_mgr_priv || !hw_caps_args) {
+ CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK",
+ hw_mgr_priv, hw_caps_args);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
+ &g_jpeg_hw_mgr.jpeg_caps,
+ sizeof(struct cam_jpeg_query_cap_cmd))) {
+ CAM_ERR(CAM_JPEG, "copy_to_user failed");
+ rc = -EFAULT;
+ goto copy_error;
+ }
+ CAM_DBG(CAM_JPEG, "Success");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return 0;
+
+copy_error:
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+}
+
+static int cam_jpeg_setup_workqs(void)
+{
+ int rc, i;
+
+ rc = cam_req_mgr_workq_create(
+ "jpeg_command_queue",
+ CAM_JPEG_WORKQ_NUM_TASK,
+ &g_jpeg_hw_mgr.work_process_frame,
+ CRM_WORKQ_USAGE_NON_IRQ, 0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+ goto work_process_frame_failed;
+ }
+
+ rc = cam_req_mgr_workq_create(
+ "jpeg_message_queue",
+ CAM_JPEG_WORKQ_NUM_TASK,
+ &g_jpeg_hw_mgr.work_process_irq_cb,
+ CRM_WORKQ_USAGE_IRQ, 0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+ goto work_process_irq_cb_failed;
+ }
+
+ g_jpeg_hw_mgr.process_frame_work_data =
+ (struct cam_jpeg_process_frame_work_data_t *)
+ kzalloc(sizeof(struct cam_jpeg_process_frame_work_data_t) *
+ CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+ if (!g_jpeg_hw_mgr.process_frame_work_data) {
+ rc = -ENOMEM;
+ goto work_process_frame_data_failed;
+ }
+
+ g_jpeg_hw_mgr.process_irq_cb_work_data =
+ (struct cam_jpeg_process_irq_work_data_t *)
+ kzalloc(sizeof(struct cam_jpeg_process_irq_work_data_t) *
+ CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+ if (!g_jpeg_hw_mgr.process_irq_cb_work_data) {
+ rc = -ENOMEM;
+ goto work_process_irq_cb_data_failed;
+ }
+
+ for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+ g_jpeg_hw_mgr.work_process_irq_cb->task.pool[i].payload =
+ &g_jpeg_hw_mgr.process_irq_cb_work_data[i];
+
+ for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+ g_jpeg_hw_mgr.work_process_frame->task.pool[i].payload =
+ &g_jpeg_hw_mgr.process_frame_work_data[i];
+
+ INIT_LIST_HEAD(&g_jpeg_hw_mgr.hw_config_req_list);
+ INIT_LIST_HEAD(&g_jpeg_hw_mgr.free_req_list);
+ for (i = 0; i < CAM_JPEG_HW_CFG_Q_MAX; i++) {
+ INIT_LIST_HEAD(&(g_jpeg_hw_mgr.req_list[i].list));
+ list_add_tail(&(g_jpeg_hw_mgr.req_list[i].list),
+ &(g_jpeg_hw_mgr.free_req_list));
+ }
+
+ return rc;
+
+work_process_irq_cb_data_failed:
+ kfree(g_jpeg_hw_mgr.process_frame_work_data);
+work_process_frame_data_failed:
+ cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_irq_cb);
+work_process_irq_cb_failed:
+ cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_frame);
+work_process_frame_failed:
+
+ return rc;
+}
+
+static int cam_jpeg_init_devices(struct device_node *of_node,
+ uint32_t *p_num_enc_dev,
+ uint32_t *p_num_dma_dev)
+{
+ int count, i, rc;
+ uint32_t num_dev;
+ uint32_t num_dma_dev;
+ const char *name = NULL;
+ struct device_node *child_node = NULL;
+ struct platform_device *child_pdev = NULL;
+ struct cam_hw_intf *child_dev_intf = NULL;
+ struct cam_hw_info *enc_hw = NULL;
+ struct cam_hw_info *dma_hw = NULL;
+ struct cam_hw_soc_info *enc_soc_info = NULL;
+ struct cam_hw_soc_info *dma_soc_info = NULL;
+
+ if (!p_num_enc_dev || !p_num_dma_dev) {
+ rc = -EINVAL;
+ goto num_dev_failed;
+ }
+ count = of_property_count_strings(of_node, "compat-hw-name");
+ if (!count) {
+ CAM_ERR(CAM_JPEG,
+ "no compat hw found in dev tree, count = %d",
+ count);
+ rc = -EINVAL;
+ goto num_dev_failed;
+ }
+
+ rc = of_property_read_u32(of_node, "num-jpeg-enc", &num_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "read num enc devices failed %d", rc);
+ goto num_enc_failed;
+ }
+ g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC] = kzalloc(
+ sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+ if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_JPEG, "getting number of dma dev nodes failed");
+ goto num_enc_failed;
+ }
+
+ rc = of_property_read_u32(of_node, "num-jpeg-dma", &num_dma_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "get num dma dev nodes failed %d", rc);
+ goto num_dma_failed;
+ }
+
+ g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA] = kzalloc(
+ sizeof(struct cam_hw_intf *) * num_dma_dev, GFP_KERNEL);
+ if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]) {
+ rc = -ENOMEM;
+ goto num_dma_failed;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "compat-hw-name",
+ i, &name);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "getting dev object name failed");
+ goto compat_hw_name_failed;
+ }
+
+ child_node = of_find_node_by_name(NULL, name);
+ if (!child_node) {
+ CAM_ERR(CAM_JPEG,
+ "error! Cannot find node in dtsi %s", name);
+ rc = -ENODEV;
+ goto compat_hw_name_failed;
+ }
+
+ child_pdev = of_find_device_by_node(child_node);
+ if (!child_pdev) {
+ CAM_ERR(CAM_JPEG, "failed to find device on bus %s",
+ child_node->name);
+ rc = -ENODEV;
+ of_node_put(child_node);
+ goto compat_hw_name_failed;
+ }
+
+ child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+ child_pdev);
+ if (!child_dev_intf) {
+ CAM_ERR(CAM_JPEG, "no child device");
+ of_node_put(child_node);
+ rc = -ENODEV;
+ goto compat_hw_name_failed;
+ }
+ CAM_DBG(CAM_JPEG, "child_intf %pK type %d id %d",
+ child_dev_intf,
+ child_dev_intf->hw_type,
+ child_dev_intf->hw_idx);
+
+ if ((child_dev_intf->hw_type == CAM_JPEG_DEV_ENC &&
+ child_dev_intf->hw_idx >= num_dev) ||
+ (child_dev_intf->hw_type == CAM_JPEG_DEV_DMA &&
+ child_dev_intf->hw_idx >= num_dma_dev)) {
+ CAM_ERR(CAM_JPEG, "index out of range");
+ rc = -ENODEV;
+ goto compat_hw_name_failed;
+ }
+ g_jpeg_hw_mgr.devices[child_dev_intf->hw_type]
+ [child_dev_intf->hw_idx] = child_dev_intf;
+
+ of_node_put(child_node);
+ }
+
+ enc_hw = (struct cam_hw_info *)
+ g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC][0]->hw_priv;
+ enc_soc_info = &enc_hw->soc_info;
+ g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_ENC][0] =
+ &enc_soc_info->reg_map[0];
+ dma_hw = (struct cam_hw_info *)
+ g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA][0]->hw_priv;
+ dma_soc_info = &dma_hw->soc_info;
+ g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_DMA][0] =
+ &dma_soc_info->reg_map[0];
+
+ *p_num_enc_dev = num_dev;
+ *p_num_dma_dev = num_dma_dev;
+
+ return rc;
+
+compat_hw_name_failed:
+ kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]);
+num_dma_failed:
+ kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]);
+num_enc_failed:
+num_dev_failed:
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+
+ if (!hw_mgr_priv || !cmd_args) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ switch (hw_cmd_args->cmd_type) {
+ case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+ cam_jpeg_mgr_print_io_bufs(
+ hw_cmd_args->u.pf_args.pf_data.packet,
+ hw_mgr->iommu_hdl,
+ hw_mgr->iommu_sec_hdl,
+ hw_cmd_args->u.pf_args.buf_info,
+ hw_cmd_args->u.pf_args.mem_found);
+ break;
+ default:
+ CAM_ERR(CAM_JPEG, "Invalid cmd");
+ }
+
+ return rc;
+}
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+ int *iommu_hdl)
+{
+ int i, rc;
+ uint32_t num_dev;
+ uint32_t num_dma_dev;
+ struct cam_hw_mgr_intf *hw_mgr_intf;
+ struct cam_iommu_handle cdm_handles;
+
+ hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+ if (!of_node || !hw_mgr_intf) {
+ CAM_ERR(CAM_JPEG, "Invalid args of_node %pK hw_mgr %pK",
+ of_node, hw_mgr_intf);
+ return -EINVAL;
+ }
+
+ memset(hw_mgr_hdl, 0x0, sizeof(struct cam_hw_mgr_intf));
+ hw_mgr_intf->hw_mgr_priv = &g_jpeg_hw_mgr;
+ hw_mgr_intf->hw_get_caps = cam_jpeg_mgr_get_hw_caps;
+ hw_mgr_intf->hw_acquire = cam_jpeg_mgr_acquire_hw;
+ hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw;
+ hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update;
+ hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
+ hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush;
+ hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop;
+ hw_mgr_intf->hw_cmd = cam_jpeg_mgr_cmd;
+
+ mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
+ spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
+
+ for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+ mutex_init(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+ rc = cam_jpeg_init_devices(of_node, &num_dev, &num_dma_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "jpeg init devices %d", rc);
+ goto smmu_get_failed;
+ }
+
+ rc = cam_smmu_get_handle("jpeg", &g_jpeg_hw_mgr.iommu_hdl);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "jpeg get iommu handle failed %d", rc);
+ goto smmu_get_failed;
+ }
+
+ CAM_DBG(CAM_JPEG, "mmu handle :%d", g_jpeg_hw_mgr.iommu_hdl);
+ rc = cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "jpeg attach failed: %d", rc);
+ goto jpeg_attach_failed;
+ }
+
+ rc = cam_cdm_get_iommu_handle("jpegenc", &cdm_handles);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "acquire cdm iommu handle Fail %d", rc);
+ g_jpeg_hw_mgr.cdm_iommu_hdl = -1;
+ g_jpeg_hw_mgr.cdm_iommu_hdl_secure = -1;
+ goto cdm_iommu_failed;
+ }
+ g_jpeg_hw_mgr.cdm_iommu_hdl = cdm_handles.non_secure;
+ g_jpeg_hw_mgr.cdm_iommu_hdl_secure = cdm_handles.secure;
+
+ g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.non_secure =
+ g_jpeg_hw_mgr.iommu_hdl;
+ g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.secure =
+ g_jpeg_hw_mgr.iommu_sec_hdl;
+ g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.non_secure =
+ g_jpeg_hw_mgr.cdm_iommu_hdl;
+ g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.secure =
+ g_jpeg_hw_mgr.cdm_iommu_hdl_secure;
+ g_jpeg_hw_mgr.jpeg_caps.num_enc = num_dev;
+ g_jpeg_hw_mgr.jpeg_caps.num_dma = num_dma_dev;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.major = 4;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.minor = 2;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.incr = 0;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.reserved = 0;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.major = 4;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.minor = 2;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.incr = 0;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.reserved = 0;
+
+ rc = cam_jpeg_setup_workqs();
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "setup work qs failed %d", rc);
+ goto cdm_iommu_failed;
+ }
+
+ if (iommu_hdl)
+ *iommu_hdl = g_jpeg_hw_mgr.iommu_hdl;
+
+ return rc;
+
+cdm_iommu_failed:
+ cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(g_jpeg_hw_mgr.iommu_hdl);
+jpeg_attach_failed:
+ g_jpeg_hw_mgr.iommu_hdl = 0;
+smmu_get_failed:
+ mutex_destroy(&g_jpeg_hw_mgr.hw_mgr_mutex);
+ for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+ mutex_destroy(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
new file mode 100644
index 000000000000..82022ec24a77
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -0,0 +1,162 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_H
+#define CAM_JPEG_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_jpeg_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+
+#define CAM_JPEG_WORKQ_NUM_TASK 30
+#define CAM_JPEG_WORKQ_TASK_CMD_TYPE 1
+#define CAM_JPEG_WORKQ_TASK_MSG_TYPE 2
+#define CAM_JPEG_HW_CFG_Q_MAX 50
+
+/**
+ * struct cam_jpeg_process_frame_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct cam_jpeg_process_frame_work_data_t {
+ uint32_t type;
+ void *data;
+ uintptr_t request_id;
+};
+
+/**
+ * struct cam_jpeg_process_irq_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to message data
+ * @result_size: Result size of enc/dma
+ * @irq_status: IRQ status
+ */
+struct cam_jpeg_process_irq_work_data_t {
+ uint32_t type;
+ void *data;
+ int32_t result_size;
+ uint32_t irq_status;
+};
+
+/**
+ * struct cam_jpeg_hw_cdm_info_t
+ *
+ * @ref_cnt: Ref count of how many times device type is acquired
+ * @cdm_handle: Cdm handle
+ * @cdm_ops: Cdm ops struct
+ */
+struct cam_jpeg_hw_cdm_info_t {
+ int ref_cnt;
+ uint32_t cdm_handle;
+ struct cam_cdm_utils_ops *cdm_ops;
+};
+
+/**
+ * struct cam_jpeg_hw_cfg_req_t
+ *
+ * @list_head: List head
+ * @hw_cfg_args: Hw config args
+ * @dev_type: Dev type for cfg request
+ * @req_id: Request Id
+ */
+struct cam_jpeg_hw_cfg_req {
+ struct list_head list;
+ struct cam_hw_config_args hw_cfg_args;
+ uint32_t dev_type;
+ uintptr_t req_id;
+};
+
+/**
+ * struct cam_jpeg_hw_ctx_data
+ *
+ * @context_priv: Context private data, cam_context from
+ * acquire.
+ * @ctx_mutex: Mutex for context
+ * @jpeg_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @in_use: Flag for context usage
+ * @wait_complete: Completion info
+ * @cdm_cmd: Cdm cmd submitted for that context.
+ */
+struct cam_jpeg_hw_ctx_data {
+ void *context_priv;
+ struct mutex ctx_mutex;
+ struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+ cam_hw_event_cb_func ctxt_event_cb;
+ bool in_use;
+ struct completion wait_complete;
+ struct cam_cdm_bl_request *cdm_cmd;
+};
+
+/**
+ * struct cam_jpeg_hw_mgr
+ * @hw_mgr_mutex: Mutex for JPEG hardware manager
+ * @hw_mgr_lock: Spinlock for JPEG hardware manager
+ * @ctx_data: Context data
+ * @jpeg_caps: JPEG capabilities
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @work_process_frame: Work queue for hw config requests
+ * @work_process_irq_cb: Work queue for processing IRQs.
+ * @process_frame_work_data: Work data pool for hw config
+ * requests
+ * @process_irq_cb_work_data: Work data pool for irq requests
+ * @cdm_iommu_hdl: Iommu handle received from cdm
+ * @cdm_iommu_hdl_secure: Secure iommu handle received from cdm
+ * @devices: Core hw Devices of JPEG hardware manager
+ * @cdm_info: Cdm info for each core device.
+ * @cdm_reg_map: Regmap of each device for cdm.
+ * @device_in_use: Flag device being used for an active request
+ * @dev_hw_cfg_args: Current cfg request per core dev
+ * @hw_config_req_list: Pending hw update requests list
+ * @free_req_list: Free nodes for above list
+ * @req_list: Nodes of hw update list
+ */
+struct cam_jpeg_hw_mgr {
+ struct mutex hw_mgr_mutex;
+ spinlock_t hw_mgr_lock;
+ struct cam_jpeg_hw_ctx_data ctx_data[CAM_JPEG_CTX_MAX];
+ struct cam_jpeg_query_cap_cmd jpeg_caps;
+ int32_t iommu_hdl;
+ int32_t iommu_sec_hdl;
+ struct cam_req_mgr_core_workq *work_process_frame;
+ struct cam_req_mgr_core_workq *work_process_irq_cb;
+ struct cam_jpeg_process_frame_work_data_t *process_frame_work_data;
+ struct cam_jpeg_process_irq_work_data_t *process_irq_cb_work_data;
+ int cdm_iommu_hdl;
+ int cdm_iommu_hdl_secure;
+
+ struct cam_hw_intf **devices[CAM_JPEG_DEV_TYPE_MAX];
+ struct cam_jpeg_hw_cdm_info_t cdm_info[CAM_JPEG_DEV_TYPE_MAX]
+ [CAM_JPEG_NUM_DEV_PER_RES_MAX];
+ struct cam_soc_reg_map *cdm_reg_map[CAM_JPEG_DEV_TYPE_MAX]
+ [CAM_JPEG_NUM_DEV_PER_RES_MAX];
+ uint32_t device_in_use[CAM_JPEG_DEV_TYPE_MAX]
+ [CAM_JPEG_NUM_DEV_PER_RES_MAX];
+ struct cam_jpeg_hw_cfg_req *dev_hw_cfg_args[CAM_JPEG_DEV_TYPE_MAX]
+ [CAM_JPEG_NUM_DEV_PER_RES_MAX];
+
+ struct list_head hw_config_req_list;
+ struct list_head free_req_list;
+ struct cam_jpeg_hw_cfg_req req_list[CAM_JPEG_HW_CFG_Q_MAX];
+};
+
+#endif /* CAM_JPEG_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
new file mode 100644
index 000000000000..48d8f14d243d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_INTF_H
+#define CAM_JPEG_HW_INTF_H
+
+#include "cam_cpas_api.h"
+
+#define CAM_JPEG_CTX_MAX 8
+#define CAM_JPEG_DEV_PER_TYPE_MAX 1
+
+#define CAM_JPEG_CMD_BUF_MAX_SIZE 128
+#define CAM_JPEG_MSG_BUF_MAX_SIZE CAM_JPEG_CMD_BUF_MAX_SIZE
+
+#define JPEG_VOTE 640000000
+
+enum cam_jpeg_hw_type {
+ CAM_JPEG_DEV_ENC,
+ CAM_JPEG_DEV_DMA,
+};
+
+struct cam_jpeg_set_irq_cb {
+ int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status,
+ int32_t result_size, void *data);
+ void *data;
+ uint32_t b_set_cb;
+};
+
+enum cam_jpeg_cmd_type {
+ CAM_JPEG_CMD_CDM_CFG,
+ CAM_JPEG_CMD_SET_IRQ_CB,
+ CAM_JPEG_CMD_MAX,
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
new file mode 100644
index 000000000000..5705890cd109
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_INTF_H
+#define CAM_JPEG_HW_MGR_INTF_H
+
+#include <uapi/media/cam_jpeg.h>
+#include <uapi/media/cam_defs.h>
+#include <linux/of.h>
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node,
+ uint64_t *hw_mgr_hdl, int *iommu_hdl);
+
+#endif /* CAM_JPEG_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
new file mode 100644
index 000000000000..26884d82070c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_dev.o jpeg_dma_core.o jpeg_dma_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
new file mode 100644
index 000000000000..b6f1d56de67f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
@@ -0,0 +1,195 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_dma_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_dma_dev->soc_info;
+ core_info = (struct cam_jpeg_dma_device_core_info *)
+ jpeg_dma_dev->core_info;
+
+ if (!soc_info || !core_info) {
+ CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ mutex_lock(&core_info->core_mutex);
+ if (++core_info->ref_count > 1) {
+ mutex_unlock(&core_info->core_mutex);
+ return 0;
+ }
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = JPEG_VOTE;
+ axi_vote.uncompressed_bw = JPEG_VOTE;
+
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+ goto cpas_failed;
+ }
+
+ rc = cam_jpeg_dma_enable_soc_resources(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+ goto soc_failed;
+ }
+
+ mutex_unlock(&core_info->core_mutex);
+
+ return 0;
+
+soc_failed:
+ cam_cpas_stop(core_info->cpas_handle);
+cpas_failed:
+ --core_info->ref_count;
+ mutex_unlock(&core_info->core_mutex);
+
+ return rc;
+}
+
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_dma_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_dma_dev->soc_info;
+ core_info = (struct cam_jpeg_dma_device_core_info *)
+ jpeg_dma_dev->core_info;
+ if (!soc_info || !core_info) {
+ CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ mutex_lock(&core_info->core_mutex);
+ if (--core_info->ref_count > 0) {
+ mutex_unlock(&core_info->core_mutex);
+ return 0;
+ }
+
+ if (core_info->ref_count < 0) {
+ CAM_ERR(CAM_JPEG, "ref cnt %d", core_info->ref_count);
+ core_info->ref_count = 0;
+ mutex_unlock(&core_info->core_mutex);
+ return -EFAULT;
+ }
+
+ rc = cam_jpeg_dma_disable_soc_resources(soc_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
+
+ rc = cam_cpas_stop(core_info->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+ mutex_unlock(&core_info->core_mutex);
+
+ return 0;
+}
+
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_dma_dev = device_priv;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (cmd_type >= CAM_JPEG_CMD_MAX) {
+ CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+ return -EINVAL;
+ }
+
+ core_info = (struct cam_jpeg_dma_device_core_info *)
+ jpeg_dma_dev->core_info;
+
+ switch (cmd_type) {
+ case CAM_JPEG_CMD_SET_IRQ_CB:
+ {
+ struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_JPEG, "cmd args NULL");
+ return -EINVAL;
+ }
+ if (irq_cb->b_set_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb =
+ irq_cb->jpeg_hw_mgr_cb;
+ core_info->irq_cb.data = irq_cb->data;
+ } else {
+ core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+ core_info->irq_cb.data = NULL;
+ }
+ rc = 0;
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data)
+{
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
new file mode 100644
index 000000000000..8f5fd58698d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_DMA_CORE_H
+#define CAM_JPEG_DMA_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+#include "cam_jpeg_hw_intf.h"
+
+struct cam_jpeg_dma_device_hw_info {
+ uint32_t reserved;
+};
+
+enum cam_jpeg_dma_core_state {
+ CAM_JPEG_DMA_CORE_NOT_READY,
+ CAM_JPEG_DMA_CORE_READY,
+ CAM_JPEG_DMA_CORE_RESETTING,
+ CAM_JPEG_DMA_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_dma_device_core_info {
+ enum cam_jpeg_dma_core_state core_state;
+ struct cam_jpeg_dma_device_hw_info *jpeg_dma_hw_info;
+ uint32_t cpas_handle;
+ struct cam_jpeg_set_irq_cb irq_cb;
+ int32_t ref_count;
+ struct mutex core_mutex;
+};
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_DMA_CORE_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
new file mode 100644
index 000000000000..fd4fdab19fa7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
@@ -0,0 +1,240 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_hw_info = {
+ .reserved = 0,
+};
+EXPORT_SYMBOL(cam_jpeg_dma_hw_info);
+
+static int cam_jpeg_dma_register_cpas(struct cam_hw_soc_info *soc_info,
+ struct cam_jpeg_dma_device_core_info *core_info,
+ uint32_t hw_idx)
+{
+ struct cam_cpas_register_params cpas_register_params;
+ int rc;
+
+ cpas_register_params.dev = soc_info->dev;
+ memcpy(cpas_register_params.identifier, "jpeg-dma",
+ sizeof("jpeg-dma"));
+ cpas_register_params.cam_cpas_client_cb = NULL;
+ cpas_register_params.cell_index = hw_idx;
+ cpas_register_params.userdata = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_params);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_handle = cpas_register_params.client_handle;
+
+ return rc;
+}
+
+static int cam_jpeg_dma_unregister_cpas(
+ struct cam_jpeg_dma_device_core_info *core_info)
+{
+ int rc;
+
+ rc = cam_cpas_unregister_client(core_info->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+ core_info->cpas_handle = 0;
+
+ return rc;
+}
+
+static int cam_jpeg_dma_remove(struct platform_device *pdev)
+{
+ struct cam_hw_info *jpeg_dma_dev = NULL;
+ struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ int rc;
+
+ jpeg_dma_dev_intf = platform_get_drvdata(pdev);
+ if (!jpeg_dma_dev_intf) {
+ CAM_ERR(CAM_JPEG, "error No data in pdev");
+ return -EINVAL;
+ }
+
+ jpeg_dma_dev = jpeg_dma_dev_intf->hw_priv;
+ if (!jpeg_dma_dev) {
+ CAM_ERR(CAM_JPEG, "error HW data is NULL");
+ rc = -ENODEV;
+ goto free_jpeg_hw_intf;
+ }
+
+ core_info = (struct cam_jpeg_dma_device_core_info *)
+ jpeg_dma_dev->core_info;
+ if (!core_info) {
+ CAM_ERR(CAM_JPEG, "error core data NULL");
+ goto deinit_soc;
+ }
+
+ rc = cam_jpeg_dma_unregister_cpas(core_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+ mutex_destroy(&core_info->core_mutex);
+ kfree(core_info);
+
+deinit_soc:
+ rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+ mutex_destroy(&jpeg_dma_dev->hw_mutex);
+ kfree(jpeg_dma_dev);
+
+free_jpeg_hw_intf:
+ kfree(jpeg_dma_dev_intf);
+ return rc;
+}
+
+static int cam_jpeg_dma_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *jpeg_dma_dev = NULL;
+ struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ struct cam_jpeg_dma_device_hw_info *hw_info = NULL;
+ int rc;
+
+ jpeg_dma_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!jpeg_dma_dev_intf)
+ return -ENOMEM;
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &jpeg_dma_dev_intf->hw_idx);
+
+ jpeg_dma_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!jpeg_dma_dev) {
+ rc = -ENOMEM;
+ goto error_alloc_dev;
+ }
+ jpeg_dma_dev->soc_info.pdev = pdev;
+ jpeg_dma_dev->soc_info.dev = &pdev->dev;
+ jpeg_dma_dev->soc_info.dev_name = pdev->name;
+ jpeg_dma_dev_intf->hw_priv = jpeg_dma_dev;
+ jpeg_dma_dev_intf->hw_ops.init = cam_jpeg_dma_init_hw;
+ jpeg_dma_dev_intf->hw_ops.deinit = cam_jpeg_dma_deinit_hw;
+ jpeg_dma_dev_intf->hw_ops.process_cmd = cam_jpeg_dma_process_cmd;
+ jpeg_dma_dev_intf->hw_type = CAM_JPEG_DEV_DMA;
+
+ platform_set_drvdata(pdev, jpeg_dma_dev_intf);
+ jpeg_dma_dev->core_info =
+ kzalloc(sizeof(struct cam_jpeg_dma_device_core_info),
+ GFP_KERNEL);
+ if (!jpeg_dma_dev->core_info) {
+ rc = -ENOMEM;
+ goto error_alloc_core;
+ }
+ core_info = (struct cam_jpeg_dma_device_core_info *)
+ jpeg_dma_dev->core_info;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_ERR(CAM_JPEG, " No jpeg_dma hardware info");
+ rc = -EINVAL;
+ goto error_match_dev;
+ }
+ hw_info = (struct cam_jpeg_dma_device_hw_info *)match_dev->data;
+ core_info->jpeg_dma_hw_info = hw_info;
+ core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY;
+ mutex_init(&core_info->core_mutex);
+
+ rc = cam_jpeg_dma_init_soc_resources(&jpeg_dma_dev->soc_info,
+ cam_jpeg_dma_irq,
+ jpeg_dma_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "failed to init_soc %d", rc);
+ goto error_init_soc;
+ }
+
+ rc = cam_jpeg_dma_register_cpas(&jpeg_dma_dev->soc_info,
+ core_info, jpeg_dma_dev_intf->hw_idx);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+ goto error_reg_cpas;
+ }
+ jpeg_dma_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&jpeg_dma_dev->hw_mutex);
+ spin_lock_init(&jpeg_dma_dev->hw_lock);
+ init_completion(&jpeg_dma_dev->hw_complete);
+
+ CAM_DBG(CAM_JPEG, " hwidx %d", jpeg_dma_dev_intf->hw_idx);
+
+ return rc;
+
+error_reg_cpas:
+ rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+error_init_soc:
+ mutex_destroy(&core_info->core_mutex);
+error_match_dev:
+ kfree(jpeg_dma_dev->core_info);
+error_alloc_core:
+ kfree(jpeg_dma_dev);
+error_alloc_dev:
+ kfree(jpeg_dma_dev_intf);
+ return rc;
+}
+
+static const struct of_device_id cam_jpeg_dma_dt_match[] = {
+ {
+ .compatible = "qcom,cam_jpeg_dma",
+ .data = &cam_jpeg_dma_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_dma_dt_match);
+
+static struct platform_driver cam_jpeg_dma_driver = {
+ .probe = cam_jpeg_dma_probe,
+ .remove = cam_jpeg_dma_remove,
+ .driver = {
+ .name = "cam-jpeg-dma",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_jpeg_dma_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_jpeg_dma_init_module(void)
+{
+ return platform_driver_register(&cam_jpeg_dma_driver);
+}
+
+static void __exit cam_jpeg_dma_exit_module(void)
+{
+ platform_driver_unregister(&cam_jpeg_dma_driver);
+}
+
+module_init(cam_jpeg_dma_init_module);
+module_exit(cam_jpeg_dma_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_DMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
new file mode 100644
index 000000000000..e64f9ddd10a2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t jpeg_dma_irq_handler, void *irq_data)
+{
+ int rc;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc)
+ return rc;
+
+ rc = cam_soc_util_request_platform_resource(soc_info,
+ jpeg_dma_irq_handler,
+ irq_data);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+ return rc;
+}
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, true);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+ return rc;
+}
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
new file mode 100644
index 000000000000..007c3277fd4a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_DMA_SOC_H_
+#define _CAM_JPEG_DMA_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t jpeg_dma_irq_handler, void *irq_data);
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_DMA_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
new file mode 100644
index 000000000000..d7d160f2a79a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_dev.o jpeg_enc_core.o jpeg_enc_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
new file mode 100644
index 000000000000..c6017afd3c37
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_ENC_HW_INFO_TITAN170_H
+#define CAM_JPEG_ENC_HW_INFO_TITAN170_H
+
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK 0x8000000
+#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_SHIFT 0x0000001b
+
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
+
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26)
+#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29)
+
+#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \
+ CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \
+ CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK
+#define CAM_JPEG_HW_MASK_COMP_ERR \
+ (CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \
+ CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK)
+
+static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_hw_info = {
+ .reg_offset = {
+ .hw_version = 0x0,
+ .int_clr = 0x1c,
+ .int_status = 0x20,
+ .int_mask = 0x18,
+ .hw_cmd = 0x10,
+ .reset_cmd = 0x8,
+ .encode_size = 0x180,
+ },
+ .reg_val = {
+ .int_clr_clearall = 0xFFFFFFFF,
+ .int_mask_disable_all = 0x00000000,
+ .int_mask_enable_all = 0xFFFFFFFF,
+ .hw_cmd_start = 0x00000001,
+ .reset_cmd = 0x00032093,
+ .hw_cmd_stop = 0x00000002,
+ },
+ .int_status = {
+ .framedone = CAM_JPEG_HW_MASK_COMP_FRAMEDONE,
+ .resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK,
+ .iserror = CAM_JPEG_HW_MASK_COMP_ERR,
+ .stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK,
+ }
+};
+
+#endif /* CAM_JPEG_ENC_HW_INFO_TITAN170_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
new file mode 100644
index 000000000000..7fcc1ada1a36
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
@@ -0,0 +1,426 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_HW_IRQ_IS_FRAME_DONE(jpeg_irq_status, hi) \
+ ((jpeg_irq_status) & (hi)->int_status.framedone)
+#define CAM_JPEG_HW_IRQ_IS_RESET_ACK(jpeg_irq_status, hi) \
+ ((jpeg_irq_status) & (hi)->int_status.resetdone)
+#define CAM_JPEG_HW_IRQ_IS_ERR(jpeg_irq_status, hi) \
+ ((jpeg_irq_status) & (hi)->int_status.iserror)
+#define CAM_JPEG_HW_IRQ_IS_STOP_DONE(jpeg_irq_status, hi) \
+ ((jpeg_irq_status) & (hi)->int_status.stopdone)
+
+#define CAM_JPEG_ENC_RESET_TIMEOUT msecs_to_jiffies(500)
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+
+ if (!soc_info || !core_info) {
+ CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ mutex_lock(&core_info->core_mutex);
+ if (++core_info->ref_count > 1) {
+ mutex_unlock(&core_info->core_mutex);
+ return 0;
+ }
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = JPEG_VOTE;
+ axi_vote.uncompressed_bw = JPEG_VOTE;
+
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+ goto cpas_failed;
+ }
+
+ rc = cam_jpeg_enc_enable_soc_resources(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+ goto soc_failed;
+ }
+
+ mutex_unlock(&core_info->core_mutex);
+
+ return 0;
+
+soc_failed:
+ cam_cpas_stop(core_info->cpas_handle);
+cpas_failed:
+ --core_info->ref_count;
+ mutex_unlock(&core_info->core_mutex);
+
+ return rc;
+}
+
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ if (!soc_info || !core_info) {
+ CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ mutex_lock(&core_info->core_mutex);
+ if (--core_info->ref_count > 0) {
+ mutex_unlock(&core_info->core_mutex);
+ return 0;
+ }
+
+ if (core_info->ref_count < 0) {
+ CAM_ERR(CAM_JPEG, "ref cnt %d", core_info->ref_count);
+ core_info->ref_count = 0;
+ mutex_unlock(&core_info->core_mutex);
+ return -EFAULT;
+ }
+
+ rc = cam_jpeg_enc_disable_soc_resources(soc_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "soc disable failed %d", rc);
+
+ rc = cam_cpas_stop(core_info->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+ mutex_unlock(&core_info->core_mutex);
+
+ return 0;
+}
+
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *jpeg_enc_dev = data;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ uint32_t irq_status = 0;
+ uint32_t encoded_size = 0;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ void __iomem *mem_base;
+
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return IRQ_HANDLED;
+ }
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ hw_info = core_info->jpeg_enc_hw_info;
+ mem_base = soc_info->reg_map[0].mem_base;
+
+ irq_status = cam_io_r_mb(mem_base +
+ core_info->jpeg_enc_hw_info->reg_offset.int_status);
+
+ cam_io_w_mb(irq_status,
+ soc_info->reg_map[0].mem_base +
+ core_info->jpeg_enc_hw_info->reg_offset.int_clr);
+
+ CAM_DBG(CAM_JPEG, "irq_num %d irq_status = %x , core_state %d",
+ irq_num, irq_status, core_info->core_state);
+
+ if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status, hw_info)) {
+ spin_lock(&jpeg_enc_dev->hw_lock);
+ if (core_info->core_state == CAM_JPEG_ENC_CORE_READY) {
+ encoded_size = cam_io_r_mb(mem_base +
+ core_info->jpeg_enc_hw_info->reg_offset.encode_size);
+ if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+ encoded_size,
+ core_info->irq_cb.data);
+ } else {
+ CAM_ERR(CAM_JPEG, "unexpected done, no cb");
+ }
+ } else {
+ CAM_ERR(CAM_JPEG, "unexpected done irq");
+ }
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+ }
+ if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status, hw_info)) {
+ spin_lock(&jpeg_enc_dev->hw_lock);
+ if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+ core_info->core_state = CAM_JPEG_ENC_CORE_READY;
+ complete(&jpeg_enc_dev->hw_complete);
+ } else {
+ CAM_ERR(CAM_JPEG, "unexpected reset irq");
+ }
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+ }
+ if (CAM_JPEG_HW_IRQ_IS_STOP_DONE(irq_status, hw_info)) {
+ spin_lock(&jpeg_enc_dev->hw_lock);
+ if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) {
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ complete(&jpeg_enc_dev->hw_complete);
+ if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+ -1,
+ core_info->irq_cb.data);
+ }
+ } else {
+ CAM_ERR(CAM_JPEG, "unexpected abort irq");
+ }
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+ }
+ /* Unexpected/unintended HW interrupt */
+ if (CAM_JPEG_HW_IRQ_IS_ERR(irq_status, hw_info)) {
+ spin_lock(&jpeg_enc_dev->hw_lock);
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ CAM_ERR_RATE_LIMIT(CAM_JPEG,
+ "error irq_num %d irq_status = %x , core_state %d",
+ irq_num, irq_status, core_info->core_state);
+
+ if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+ -1,
+ core_info->irq_cb.data);
+ }
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int cam_jpeg_enc_reset_hw(void *data,
+ void *start_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = data;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ void __iomem *mem_base;
+ unsigned long rem_jiffies;
+
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+ /* maskdisable.clrirq.maskenable.resetcmd */
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ hw_info = core_info->jpeg_enc_hw_info;
+ mem_base = soc_info->reg_map[0].mem_base;
+
+ mutex_lock(&core_info->core_mutex);
+ spin_lock(&jpeg_enc_dev->hw_lock);
+ if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+ CAM_ERR(CAM_JPEG, "alrady resetting");
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+ mutex_unlock(&core_info->core_mutex);
+ return 0;
+ }
+
+ reinit_completion(&jpeg_enc_dev->hw_complete);
+ core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING;
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+
+ cam_io_w_mb(hw_info->reg_val.int_mask_disable_all,
+ mem_base + hw_info->reg_offset.int_mask);
+ cam_io_w_mb(hw_info->reg_val.int_clr_clearall,
+ mem_base + hw_info->reg_offset.int_clr);
+ cam_io_w_mb(hw_info->reg_val.int_mask_enable_all,
+ mem_base + hw_info->reg_offset.int_mask);
+ cam_io_w_mb(hw_info->reg_val.reset_cmd,
+ mem_base + hw_info->reg_offset.reset_cmd);
+
+ rem_jiffies = wait_for_completion_timeout(&jpeg_enc_dev->hw_complete,
+ CAM_JPEG_ENC_RESET_TIMEOUT);
+ if (!rem_jiffies) {
+ CAM_ERR(CAM_JPEG, "error Reset Timeout");
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ }
+
+ mutex_unlock(&core_info->core_mutex);
+ return 0;
+}
+
+int cam_jpeg_enc_start_hw(void *data,
+ void *start_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = data;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ void __iomem *mem_base;
+
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ hw_info = core_info->jpeg_enc_hw_info;
+ mem_base = soc_info->reg_map[0].mem_base;
+
+ if (core_info->core_state != CAM_JPEG_ENC_CORE_READY) {
+ CAM_ERR(CAM_JPEG, "Error not ready");
+ return -EINVAL;
+ }
+
+ cam_io_w_mb(hw_info->reg_val.hw_cmd_start,
+ mem_base + hw_info->reg_offset.hw_cmd);
+
+ return 0;
+}
+
+int cam_jpeg_enc_stop_hw(void *data,
+ void *stop_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = data;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ void __iomem *mem_base;
+ unsigned long rem_jiffies;
+
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ hw_info = core_info->jpeg_enc_hw_info;
+ mem_base = soc_info->reg_map[0].mem_base;
+
+ mutex_lock(&core_info->core_mutex);
+ spin_lock(&jpeg_enc_dev->hw_lock);
+ if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) {
+ CAM_ERR(CAM_JPEG, "alrady stopping");
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+ mutex_unlock(&core_info->core_mutex);
+ return 0;
+ }
+
+ reinit_completion(&jpeg_enc_dev->hw_complete);
+ core_info->core_state = CAM_JPEG_ENC_CORE_ABORTING;
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+
+ cam_io_w_mb(hw_info->reg_val.hw_cmd_stop,
+ mem_base + hw_info->reg_offset.hw_cmd);
+
+ rem_jiffies = wait_for_completion_timeout(&jpeg_enc_dev->hw_complete,
+ CAM_JPEG_ENC_RESET_TIMEOUT);
+ if (!rem_jiffies) {
+ CAM_ERR(CAM_JPEG, "error Reset Timeout");
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ }
+
+ mutex_unlock(&core_info->core_mutex);
+ return 0;
+}
+
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = device_priv;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (cmd_type >= CAM_JPEG_CMD_MAX) {
+ CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+ return -EINVAL;
+ }
+
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+
+ switch (cmd_type) {
+ case CAM_JPEG_CMD_SET_IRQ_CB:
+ {
+ struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_JPEG, "cmd args NULL");
+ return -EINVAL;
+ }
+ if (irq_cb->b_set_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb =
+ irq_cb->jpeg_hw_mgr_cb;
+ core_info->irq_cb.data = irq_cb->data;
+ } else {
+ core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+ core_info->irq_cb.data = NULL;
+ }
+ rc = 0;
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ CAM_ERR(CAM_JPEG, "error cmdtype %d rc = %d", cmd_type, rc);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
new file mode 100644
index 000000000000..5c571ab893f8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
@@ -0,0 +1,87 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_ENC_CORE_H
+#define CAM_JPEG_ENC_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+#include "cam_jpeg_hw_intf.h"
+
+struct cam_jpeg_enc_reg_offsets {
+ uint32_t hw_version;
+ uint32_t int_status;
+ uint32_t int_clr;
+ uint32_t int_mask;
+ uint32_t hw_cmd;
+ uint32_t reset_cmd;
+ uint32_t encode_size;
+};
+
+struct cam_jpeg_enc_regval {
+ uint32_t int_clr_clearall;
+ uint32_t int_mask_disable_all;
+ uint32_t int_mask_enable_all;
+ uint32_t hw_cmd_start;
+ uint32_t reset_cmd;
+ uint32_t hw_cmd_stop;
+};
+
+struct cam_jpeg_enc_int_status {
+ uint32_t framedone;
+ uint32_t resetdone;
+ uint32_t iserror;
+ uint32_t stopdone;
+};
+
+struct cam_jpeg_enc_device_hw_info {
+ struct cam_jpeg_enc_reg_offsets reg_offset;
+ struct cam_jpeg_enc_regval reg_val;
+ struct cam_jpeg_enc_int_status int_status;
+};
+
+enum cam_jpeg_enc_core_state {
+ CAM_JPEG_ENC_CORE_NOT_READY,
+ CAM_JPEG_ENC_CORE_READY,
+ CAM_JPEG_ENC_CORE_RESETTING,
+ CAM_JPEG_ENC_CORE_ABORTING,
+ CAM_JPEG_ENC_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_enc_device_core_info {
+ enum cam_jpeg_enc_core_state core_state;
+ struct cam_jpeg_enc_device_hw_info *jpeg_enc_hw_info;
+ uint32_t cpas_handle;
+ struct cam_jpeg_set_irq_cb irq_cb;
+ int32_t ref_count;
+ struct mutex core_mutex;
+};
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_start_hw(void *device_priv,
+ void *start_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_stop_hw(void *device_priv,
+ void *stop_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_reset_hw(void *device_priv,
+ void *reset_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_ENC_CORE_H */
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
new file mode 100644
index 000000000000..d4daa6dde308
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_jpeg_enc_hw_info_ver_4_2_0.h"
+
+static int cam_jpeg_enc_register_cpas(struct cam_hw_soc_info *soc_info,
+ struct cam_jpeg_enc_device_core_info *core_info,
+ uint32_t hw_idx)
+{
+ struct cam_cpas_register_params cpas_register_params;
+ int rc;
+
+ cpas_register_params.dev = soc_info->dev;
+ memcpy(cpas_register_params.identifier, "jpeg-enc",
+ sizeof("jpeg-enc"));
+ cpas_register_params.cam_cpas_client_cb = NULL;
+ cpas_register_params.cell_index = hw_idx;
+ cpas_register_params.userdata = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_params);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_handle = cpas_register_params.client_handle;
+
+ return rc;
+}
+
+static int cam_jpeg_enc_unregister_cpas(
+ struct cam_jpeg_enc_device_core_info *core_info)
+{
+ int rc;
+
+ rc = cam_cpas_unregister_client(core_info->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+ core_info->cpas_handle = 0;
+
+ return rc;
+}
+
+static int cam_jpeg_enc_remove(struct platform_device *pdev)
+{
+ struct cam_hw_info *jpeg_enc_dev = NULL;
+ struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ int rc;
+
+ jpeg_enc_dev_intf = platform_get_drvdata(pdev);
+ if (!jpeg_enc_dev_intf) {
+ CAM_ERR(CAM_JPEG, "error No data in pdev");
+ return -EINVAL;
+ }
+
+ jpeg_enc_dev = jpeg_enc_dev_intf->hw_priv;
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "error HW data is NULL");
+ rc = -ENODEV;
+ goto free_jpeg_hw_intf;
+ }
+
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ if (!core_info) {
+ CAM_ERR(CAM_JPEG, "error core data NULL");
+ goto deinit_soc;
+ }
+
+ rc = cam_jpeg_enc_unregister_cpas(core_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+ mutex_destroy(&core_info->core_mutex);
+ kfree(core_info);
+
+deinit_soc:
+ rc = cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+ mutex_destroy(&jpeg_enc_dev->hw_mutex);
+ kfree(jpeg_enc_dev);
+
+free_jpeg_hw_intf:
+ kfree(jpeg_enc_dev_intf);
+ return rc;
+}
+
+static int cam_jpeg_enc_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *jpeg_enc_dev = NULL;
+ struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ int rc;
+
+ jpeg_enc_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!jpeg_enc_dev_intf)
+ return -ENOMEM;
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &jpeg_enc_dev_intf->hw_idx);
+
+ jpeg_enc_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!jpeg_enc_dev) {
+ rc = -ENOMEM;
+ goto error_alloc_dev;
+ }
+ jpeg_enc_dev->soc_info.pdev = pdev;
+ jpeg_enc_dev->soc_info.dev = &pdev->dev;
+ jpeg_enc_dev->soc_info.dev_name = pdev->name;
+ jpeg_enc_dev_intf->hw_priv = jpeg_enc_dev;
+ jpeg_enc_dev_intf->hw_ops.init = cam_jpeg_enc_init_hw;
+ jpeg_enc_dev_intf->hw_ops.deinit = cam_jpeg_enc_deinit_hw;
+ jpeg_enc_dev_intf->hw_ops.start = cam_jpeg_enc_start_hw;
+ jpeg_enc_dev_intf->hw_ops.stop = cam_jpeg_enc_stop_hw;
+ jpeg_enc_dev_intf->hw_ops.reset = cam_jpeg_enc_reset_hw;
+ jpeg_enc_dev_intf->hw_ops.process_cmd = cam_jpeg_enc_process_cmd;
+ jpeg_enc_dev_intf->hw_type = CAM_JPEG_DEV_ENC;
+
+ platform_set_drvdata(pdev, jpeg_enc_dev_intf);
+ jpeg_enc_dev->core_info =
+ kzalloc(sizeof(struct cam_jpeg_enc_device_core_info),
+ GFP_KERNEL);
+ if (!jpeg_enc_dev->core_info) {
+ rc = -ENOMEM;
+ goto error_alloc_core;
+ }
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_ERR(CAM_JPEG, " No jpeg_enc hardware info");
+ rc = -EINVAL;
+ goto error_match_dev;
+ }
+ hw_info = (struct cam_jpeg_enc_device_hw_info *)match_dev->data;
+ core_info->jpeg_enc_hw_info = hw_info;
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ mutex_init(&core_info->core_mutex);
+
+ rc = cam_jpeg_enc_init_soc_resources(&jpeg_enc_dev->soc_info,
+ cam_jpeg_enc_irq,
+ jpeg_enc_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, " failed to init_soc %d", rc);
+ goto error_init_soc;
+ }
+
+ rc = cam_jpeg_enc_register_cpas(&jpeg_enc_dev->soc_info,
+ core_info, jpeg_enc_dev_intf->hw_idx);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+ goto error_reg_cpas;
+ }
+ jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&jpeg_enc_dev->hw_mutex);
+ spin_lock_init(&jpeg_enc_dev->hw_lock);
+ init_completion(&jpeg_enc_dev->hw_complete);
+
+ return rc;
+
+error_reg_cpas:
+ cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+error_init_soc:
+ mutex_destroy(&core_info->core_mutex);
+error_match_dev:
+ kfree(jpeg_enc_dev->core_info);
+error_alloc_core:
+ kfree(jpeg_enc_dev);
+error_alloc_dev:
+ kfree(jpeg_enc_dev_intf);
+
+ return rc;
+}
+
+static const struct of_device_id cam_jpeg_enc_dt_match[] = {
+ {
+ .compatible = "qcom,cam_jpeg_enc",
+ .data = &cam_jpeg_enc_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_enc_dt_match);
+
+static struct platform_driver cam_jpeg_enc_driver = {
+ .probe = cam_jpeg_enc_probe,
+ .remove = cam_jpeg_enc_remove,
+ .driver = {
+ .name = "cam-jpeg-enc",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_jpeg_enc_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_jpeg_enc_init_module(void)
+{
+ return platform_driver_register(&cam_jpeg_enc_driver);
+}
+
+static void __exit cam_jpeg_enc_exit_module(void)
+{
+ platform_driver_unregister(&cam_jpeg_enc_driver);
+}
+
+module_init(cam_jpeg_enc_init_module);
+module_exit(cam_jpeg_enc_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_ENC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
new file mode 100644
index 000000000000..647208cccee5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t jpeg_enc_irq_handler, void *irq_data)
+{
+ int rc;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc)
+ return rc;
+
+ rc = cam_soc_util_request_platform_resource(soc_info,
+ jpeg_enc_irq_handler,
+ irq_data);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+ return rc;
+}
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, true);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+ return rc;
+}
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
new file mode 100644
index 000000000000..56525c4d2ba0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_ENC_SOC_H_
+#define _CAM_JPEG_ENC_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t jpeg_enc_irq_handler, void *irq_data);
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_ENC_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/Makefile b/drivers/media/platform/msm/camera_v3/cam_lrme/Makefile
new file mode 100644
index 000000000000..5e46bf2531f8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_dev.o cam_lrme_context.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_context.c
new file mode 100644
index 000000000000..99a8fe1f8eca
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_context.c
@@ -0,0 +1,258 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_lrme_context.h"
+
+static const char lrme_dev_name[] = "lrme";
+
+static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc = 0;
+ uintptr_t ctxt_to_hw_map = (uintptr_t)ctx->ctxt_to_hw_map;
+ struct cam_lrme_context *lrme_ctx = ctx->ctx_priv;
+
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+ rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire");
+ return rc;
+ }
+
+ ctxt_to_hw_map |= (lrme_ctx->index << CAM_LRME_CTX_INDEX_SHIFT);
+ ctx->ctxt_to_hw_map = (void *)ctxt_to_hw_map;
+
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to release");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+ rc = cam_context_start_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to start");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACTIVATED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ int rc;
+
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+ rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to config");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx,
+ struct cam_flush_dev_cmd *cmd)
+{
+ int rc;
+
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+ rc = cam_context_flush_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to flush device");
+
+ return rc;
+}
+static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+ rc = cam_context_stop_dev_to_hw(ctx);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stop dev");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+ rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stop");
+ return rc;
+ }
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to release");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_handle_irq_in_activated(void *context,
+ uint32_t evt_id, void *evt_data)
+{
+ int rc;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in buf done, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+ cam_lrme_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+ /* Uninit */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Available */
+ {
+ .ioctl_ops = {
+ .acquire_dev = __cam_lrme_ctx_acquire_dev_in_available,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Acquired */
+ {
+ .ioctl_ops = {
+ .config_dev = __cam_lrme_ctx_config_dev_in_activated,
+ .release_dev = __cam_lrme_ctx_release_dev_in_acquired,
+ .start_dev = __cam_lrme_ctx_start_dev_in_acquired,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Ready */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Activate */
+ {
+ .ioctl_ops = {
+ .config_dev = __cam_lrme_ctx_config_dev_in_activated,
+ .release_dev = __cam_lrme_ctx_release_dev_in_activated,
+ .stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
+ .flush_dev = __cam_lrme_ctx_flush_dev_in_activated,
+ },
+ .crm_ops = {},
+ .irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
+ },
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+ struct cam_context *base_ctx,
+ struct cam_hw_mgr_intf *hw_intf,
+ uint32_t index)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ if (!base_ctx || !lrme_ctx) {
+ CAM_ERR(CAM_LRME, "Invalid input");
+ return -EINVAL;
+ }
+
+ memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+
+ rc = cam_context_init(base_ctx, lrme_dev_name, CAM_LRME, index,
+ NULL, hw_intf, lrme_ctx->req_base, CAM_CTX_REQ_MAX);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to init context");
+ return rc;
+ }
+ lrme_ctx->base = base_ctx;
+ lrme_ctx->index = index;
+ base_ctx->ctx_priv = lrme_ctx;
+ base_ctx->state_machine = cam_lrme_ctx_state_machine;
+
+ return rc;
+}
+
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ if (!lrme_ctx) {
+ CAM_ERR(CAM_LRME, "No ctx to deinit");
+ return -EINVAL;
+ }
+
+ rc = cam_context_deinit(lrme_ctx->base);
+
+ memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_context.h b/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_context.h
new file mode 100644
index 000000000000..dc1c8f4c10aa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_context.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_CONTEXT_H_
+#define _CAM_LRME_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_sync_api.h"
+
+#define CAM_LRME_CTX_INDEX_SHIFT 16
+
+/**
+ * struct cam_lrme_context
+ *
+ * @base : Base context pointer for this LRME context
+ * @req_base : List of base request for this LRME context
+ */
+struct cam_lrme_context {
+ struct cam_context *base;
+ struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+ uint64_t index;
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+ struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+ uint32_t index);
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx);
+
+#endif /* _CAM_LRME_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_dev.c
new file mode 100644
index 000000000000..6b1250aea714
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/cam_lrme_dev.c
@@ -0,0 +1,242 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_lrme_hw_mgr_intf.h"
+
+#define CAM_LRME_DEV_NAME "cam-lrme"
+
+/**
+ * struct cam_lrme_dev
+ *
+ * @sd : Subdev information
+ * @ctx : List of base contexts
+ * @lrme_ctx : List of LRME contexts
+ * @lock : Mutex for LRME subdev
+ * @open_cnt : Open count of LRME subdev
+ */
+struct cam_lrme_dev {
+ struct cam_subdev sd;
+ struct cam_context ctx[CAM_CTX_MAX];
+ struct cam_lrme_context lrme_ctx[CAM_CTX_MAX];
+ struct mutex lock;
+ uint32_t open_cnt;
+};
+
+static struct cam_lrme_dev *g_lrme_dev;
+
+static int cam_lrme_dev_buf_done_cb(void *ctxt_to_hw_map, uint32_t evt_id,
+ void *evt_data)
+{
+ uint64_t index;
+ struct cam_context *ctx;
+ int rc;
+
+ index = CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map);
+ CAM_DBG(CAM_LRME, "ctx index %llu, evt_id %u\n", index, evt_id);
+ ctx = &g_lrme_dev->ctx[index];
+ rc = ctx->irq_cb_intf(ctx, evt_id, evt_data);
+ if (rc)
+ CAM_ERR(CAM_LRME, "irq callback failed");
+
+ return rc;
+}
+
+static int cam_lrme_dev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+
+ if (!lrme_dev) {
+ CAM_ERR(CAM_LRME,
+ "LRME Dev not initialized, dev=%pK", lrme_dev);
+ return -ENODEV;
+ }
+
+ mutex_lock(&lrme_dev->lock);
+ lrme_dev->open_cnt++;
+ mutex_unlock(&lrme_dev->lock);
+
+ return 0;
+}
+
+static int cam_lrme_dev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+ struct cam_node *node = v4l2_get_subdevdata(sd);
+
+ if (!lrme_dev) {
+ CAM_ERR(CAM_LRME, "Invalid args");
+ return -ENODEV;
+ }
+
+ mutex_lock(&lrme_dev->lock);
+ if (lrme_dev->open_cnt <= 0) {
+ CAM_DBG(CAM_LRME, "LRME subdev is already closed");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ lrme_dev->open_cnt--;
+ if (!node) {
+ CAM_ERR(CAM_LRME, "Node is NULL");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (lrme_dev->open_cnt == 0)
+ cam_node_shutdown(node);
+
+end:
+ mutex_unlock(&lrme_dev->lock);
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = {
+ .open = cam_lrme_dev_open,
+ .close = cam_lrme_dev_close,
+};
+
+static int cam_lrme_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+ struct cam_hw_mgr_intf hw_mgr_intf;
+ struct cam_node *node;
+
+ g_lrme_dev = kzalloc(sizeof(struct cam_lrme_dev), GFP_KERNEL);
+ if (!g_lrme_dev) {
+ CAM_ERR(CAM_LRME, "No memory");
+ return -ENOMEM;
+ }
+ g_lrme_dev->sd.internal_ops = &cam_lrme_subdev_internal_ops;
+
+ mutex_init(&g_lrme_dev->lock);
+
+ rc = cam_subdev_probe(&g_lrme_dev->sd, pdev, CAM_LRME_DEV_NAME,
+ CAM_LRME_DEVICE_TYPE);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME cam_subdev_probe failed");
+ goto free_mem;
+ }
+ node = (struct cam_node *)g_lrme_dev->sd.token;
+
+ rc = cam_lrme_hw_mgr_init(&hw_mgr_intf, cam_lrme_dev_buf_done_cb);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Can not initialized LRME HW manager");
+ goto unregister;
+ }
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_lrme_context_init(&g_lrme_dev->lrme_ctx[i],
+ &g_lrme_dev->ctx[i],
+ &node->hw_mgr_intf, i);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME context init failed");
+ goto deinit_ctx;
+ }
+ }
+
+ rc = cam_node_init(node, &hw_mgr_intf, g_lrme_dev->ctx, CAM_CTX_MAX,
+ CAM_LRME_DEV_NAME);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME node init failed");
+ goto deinit_ctx;
+ }
+
+ CAM_DBG(CAM_LRME, "%s probe complete", g_lrme_dev->sd.name);
+
+ return 0;
+
+deinit_ctx:
+ for (--i; i >= 0; i--) {
+ if (cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]))
+ CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+ }
+unregister:
+ if (cam_subdev_remove(&g_lrme_dev->sd))
+ CAM_ERR(CAM_LRME, "Failed in subdev remove");
+free_mem:
+ kfree(g_lrme_dev);
+
+ return rc;
+}
+
+static int cam_lrme_dev_remove(struct platform_device *pdev)
+{
+ int i;
+ int rc = 0;
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]);
+ if (rc)
+ CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+ }
+
+ rc = cam_lrme_hw_mgr_deinit();
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed in hw mgr deinit, rc=%d", rc);
+
+ rc = cam_subdev_remove(&g_lrme_dev->sd);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Unregister failed");
+
+ mutex_destroy(&g_lrme_dev->lock);
+ kfree(g_lrme_dev);
+ g_lrme_dev = NULL;
+
+ return rc;
+}
+
+static const struct of_device_id cam_lrme_dt_match[] = {
+ {
+ .compatible = "qcom,cam-lrme"
+ },
+ {}
+};
+
+static struct platform_driver cam_lrme_driver = {
+ .probe = cam_lrme_dev_probe,
+ .remove = cam_lrme_dev_remove,
+ .driver = {
+ .name = "cam_lrme",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_lrme_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_lrme_dev_init_module(void)
+{
+ return platform_driver_register(&cam_lrme_driver);
+}
+
+static void __exit cam_lrme_dev_exit_module(void)
+{
+ platform_driver_unregister(&cam_lrme_driver);
+}
+
+module_init(cam_lrme_dev_init_module);
+module_exit(cam_lrme_dev_exit_module);
+MODULE_DESCRIPTION("MSM LRME driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/Makefile b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/Makefile
new file mode 100644
index 000000000000..a048afece9db
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
new file mode 100644
index 000000000000..0c236462b7f5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -0,0 +1,1150 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_mgr_intf.h"
+#include "cam_lrme_hw_mgr.h"
+
+static struct cam_lrme_hw_mgr g_lrme_hw_mgr;
+
+static int cam_lrme_mgr_util_reserve_device(struct cam_lrme_hw_mgr *hw_mgr,
+ struct cam_lrme_acquire_args *lrme_acquire_args)
+{
+ int i, index = 0;
+ uint32_t min_ctx = UINT_MAX;
+ struct cam_lrme_device *hw_device = NULL;
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!hw_mgr->device_count) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_LRME, "No device is registered");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hw_mgr->device_count && i < CAM_LRME_HW_MAX; i++) {
+ hw_device = &hw_mgr->hw_device[i];
+ if (!hw_device->num_context) {
+ index = i;
+ break;
+ }
+ if (hw_device->num_context < min_ctx) {
+ min_ctx = hw_device->num_context;
+ index = i;
+ }
+ }
+
+ hw_device = &hw_mgr->hw_device[index];
+ hw_device->num_context++;
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ CAM_DBG(CAM_LRME, "reserve device index %d", index);
+
+ return index;
+}
+
+static int cam_lrme_mgr_util_get_device(struct cam_lrme_hw_mgr *hw_mgr,
+ uint32_t device_index, struct cam_lrme_device **hw_device)
+{
+ if (!hw_mgr) {
+ CAM_ERR(CAM_LRME, "invalid params hw_mgr %pK", hw_mgr);
+ return -EINVAL;
+ }
+
+ if (device_index >= CAM_LRME_HW_MAX) {
+ CAM_ERR(CAM_LRME, "Wrong device index %d", device_index);
+ return -EINVAL;
+ }
+
+ *hw_device = &hw_mgr->hw_device[device_index];
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_packet_validate(struct cam_packet *packet)
+{
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ int i, rc;
+
+ if (!packet) {
+ CAM_ERR(CAM_LRME, "Invalid args");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+ packet->header.request_id, packet->header.op_code,
+ packet->header.size, packet->header.flags);
+ CAM_DBG(CAM_LRME,
+ "Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+ packet->cmd_buf_offset, packet->num_cmd_buf,
+ packet->io_configs_offset, packet->num_io_configs);
+ CAM_DBG(CAM_LRME,
+ "Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+ packet->patch_offset, packet->num_patches,
+ packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+ if (cam_packet_util_validate_packet(packet)) {
+ CAM_ERR(CAM_LRME, "invalid packet:%d %d %d %d %d",
+ packet->kmd_cmd_buf_index,
+ packet->num_cmd_buf, packet->cmd_buf_offset,
+ packet->io_configs_offset, packet->header.size);
+ return -EINVAL;
+ }
+
+ if (!packet->num_io_configs) {
+ CAM_ERR(CAM_LRME, "no io configs");
+ return -EINVAL;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)&packet->payload +
+ packet->cmd_buf_offset);
+
+ for (i = 0; i < packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ CAM_DBG(CAM_LRME,
+ "CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+ i,
+ cmd_desc[i].mem_handle, cmd_desc[i].offset,
+ cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+ cmd_desc[i].meta_data);
+
+ rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Invalid cmd buffer %d", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_lrme_hw_io_buffer *input_buf,
+ struct cam_lrme_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+ int rc = -EINVAL;
+ uint32_t num_in_buf, num_out_buf, i, j, plane;
+ struct cam_buf_io_cfg *io_cfg;
+ uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+ size_t size;
+
+ num_in_buf = 0;
+ num_out_buf = 0;
+ io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)
+ &prepare->packet->payload +
+ prepare->packet->io_configs_offset);
+
+ for (i = 0; i < prepare->packet->num_io_configs; i++) {
+ CAM_DBG(CAM_LRME,
+ "IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+ i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+ io_cfg[i].resource_type,
+ io_cfg[i].fence, io_cfg[i].format);
+
+ if ((num_in_buf > io_buf_size) ||
+ (num_out_buf > io_buf_size)) {
+ CAM_ERR(CAM_LRME, "Invalid number of buffers %d %d %d",
+ num_in_buf, num_out_buf, io_buf_size);
+ return -EINVAL;
+ }
+
+ memset(io_addr, 0, sizeof(io_addr));
+ for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+ if (!io_cfg[i].mem_handle[plane])
+ break;
+
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
+ iommu_hdl, &io_addr[plane], &size);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Cannot get io buf for %d %d",
+ plane, rc);
+ return -ENOMEM;
+ }
+
+ io_addr[plane] += io_cfg[i].offsets[plane];
+
+ CAM_DBG(CAM_LRME, "IO Address[%d][%d] : %llu",
+ io_cfg[i].direction, plane, io_addr[plane]);
+ }
+
+ switch (io_cfg[i].direction) {
+ case CAM_BUF_INPUT: {
+ prepare->in_map_entries[num_in_buf].resource_handle =
+ io_cfg[i].resource_type;
+ prepare->in_map_entries[num_in_buf].sync_id =
+ io_cfg[i].fence;
+
+ input_buf[num_in_buf].valid = true;
+ for (j = 0; j < plane; j++)
+ input_buf[num_in_buf].io_addr[j] = io_addr[j];
+ input_buf[num_in_buf].num_plane = plane;
+ input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+ num_in_buf++;
+ break;
+ }
+ case CAM_BUF_OUTPUT: {
+ prepare->out_map_entries[num_out_buf].resource_handle =
+ io_cfg[i].resource_type;
+ prepare->out_map_entries[num_out_buf].sync_id =
+ io_cfg[i].fence;
+
+ output_buf[num_out_buf].valid = true;
+ for (j = 0; j < plane; j++)
+ output_buf[num_out_buf].io_addr[j] = io_addr[j];
+ output_buf[num_out_buf].num_plane = plane;
+ output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+ num_out_buf++;
+ break;
+ }
+ default:
+ CAM_ERR(CAM_LRME, "Unsupported io direction %d",
+ io_cfg[i].direction);
+ return -EINVAL;
+ }
+ }
+ prepare->num_in_map_entries = num_in_buf;
+ prepare->num_out_map_entries = num_out_buf;
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_hw_update_entries(
+ struct cam_lrme_hw_mgr *hw_mgr,
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_lrme_hw_cmd_config_args *config_args,
+ struct cam_kmd_buf_info *kmd_buf_info)
+{
+ int i, rc = 0;
+ struct cam_lrme_device *hw_device = NULL;
+ uint32_t *kmd_buf_addr;
+ uint32_t num_entry;
+ uint32_t kmd_buf_max_size;
+ uint32_t kmd_buf_used_bytes = 0;
+ struct cam_hw_update_entry *hw_entry;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+ hw_device = config_args->hw_device;
+ if (!hw_device) {
+ CAM_ERR(CAM_LRME, "Invalid hw_device");
+ return -EINVAL;
+ }
+
+ kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes);
+ kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+ config_args->cmd_buf_addr = kmd_buf_addr;
+ config_args->size = kmd_buf_max_size;
+ config_args->config_buf_size = 0;
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+ config_args,
+ sizeof(struct cam_lrme_hw_cmd_config_args));
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed in CMD_PREPARE_HW_UPDATE %d", rc);
+ return rc;
+ }
+ } else {
+ CAM_ERR(CAM_LRME, "Can't find handle function");
+ return -EINVAL;
+ }
+
+ kmd_buf_used_bytes += config_args->config_buf_size;
+
+ if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+ CAM_ERR(CAM_LRME, "Invalid kmd used bytes %d (%d)",
+ kmd_buf_used_bytes, kmd_buf_max_size);
+ return -ENOMEM;
+ }
+
+ hw_entry = prepare->hw_update_entries;
+ num_entry = 0;
+
+ if (config_args->config_buf_size) {
+ if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "Insufficient HW entries :%d %d",
+ num_entry, prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ hw_entry[num_entry].handle = kmd_buf_info->handle;
+ hw_entry[num_entry].len = config_args->config_buf_size;
+ hw_entry[num_entry].offset = kmd_buf_info->offset;
+
+ kmd_buf_info->used_bytes += config_args->config_buf_size;
+ kmd_buf_info->offset += config_args->config_buf_size;
+ num_entry++;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+ &prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+ for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "Exceed max num of entry");
+ return -EINVAL;
+ }
+
+ hw_entry[num_entry].handle = cmd_desc[i].mem_handle;
+ hw_entry[num_entry].len = cmd_desc[i].length;
+ hw_entry[num_entry].offset = cmd_desc[i].offset;
+ num_entry++;
+ }
+ prepare->num_hw_update_entries = num_entry;
+
+ CAM_DBG(CAM_LRME, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+ prepare->num_hw_update_entries, prepare->num_in_map_entries,
+ prepare->num_out_map_entries);
+
+ return rc;
+}
+
+static void cam_lrme_mgr_util_put_frame_req(
+ struct list_head *src_list,
+ struct list_head *list,
+ spinlock_t *lock)
+{
+ spin_lock(lock);
+ list_add_tail(list, src_list);
+ spin_unlock(lock);
+}
+
+static int cam_lrme_mgr_util_get_frame_req(
+ struct list_head *src_list,
+ struct cam_lrme_frame_request **frame_req,
+ spinlock_t *lock)
+{
+ int rc = 0;
+ struct cam_lrme_frame_request *req_ptr = NULL;
+
+ spin_lock(lock);
+ if (!list_empty(src_list)) {
+ req_ptr = list_first_entry(src_list,
+ struct cam_lrme_frame_request, frame_list);
+ list_del_init(&req_ptr->frame_list);
+ } else {
+ rc = -ENOENT;
+ }
+ *frame_req = req_ptr;
+ spin_unlock(lock);
+
+ return rc;
+}
+
+
+static int cam_lrme_mgr_util_submit_req(void *priv, void *data)
+{
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_hw_mgr *hw_mgr;
+ struct cam_lrme_frame_request *frame_req = NULL;
+ struct cam_lrme_hw_submit_args submit_args;
+ struct cam_lrme_mgr_work_data *work_data;
+ int rc;
+ int req_prio = 0;
+
+ if (!priv) {
+ CAM_ERR(CAM_LRME, "worker doesn't have private data");
+ return -EINVAL;
+ }
+
+ hw_mgr = (struct cam_lrme_hw_mgr *)priv;
+ work_data = (struct cam_lrme_mgr_work_data *)data;
+ hw_device = work_data->hw_device;
+
+ rc = cam_lrme_mgr_util_get_frame_req(
+ &hw_device->frame_pending_list_high, &frame_req,
+ &hw_device->high_req_lock);
+
+ if (!frame_req) {
+ rc = cam_lrme_mgr_util_get_frame_req(
+ &hw_device->frame_pending_list_normal, &frame_req,
+ &hw_device->normal_req_lock);
+ if (frame_req)
+ req_prio = 1;
+ }
+
+ if (!frame_req) {
+ CAM_DBG(CAM_LRME, "No pending request");
+ return 0;
+ }
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ submit_args.hw_update_entries = frame_req->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ frame_req->num_hw_update_entries;
+ submit_args.frame_req = frame_req;
+
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_SUBMIT,
+ &submit_args, sizeof(struct cam_lrme_hw_submit_args));
+
+ if (rc == -EBUSY)
+ CAM_DBG(CAM_LRME, "device busy");
+ else if (rc)
+ CAM_ERR(CAM_LRME, "submit request failed rc %d", rc);
+ if (rc) {
+ req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+ spin_lock(&hw_device->normal_req_lock);
+ list_add(&frame_req->frame_list,
+ (req_prio == 0 ?
+ &hw_device->frame_pending_list_high :
+ &hw_device->frame_pending_list_normal));
+ req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+ spin_unlock(&hw_device->normal_req_lock);
+ }
+ if (rc == -EBUSY)
+ rc = 0;
+ } else {
+ req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+ spin_lock(&hw_device->normal_req_lock);
+ list_add(&frame_req->frame_list,
+ (req_prio == 0 ?
+ &hw_device->frame_pending_list_high :
+ &hw_device->frame_pending_list_normal));
+ req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+ spin_unlock(&hw_device->normal_req_lock);
+ rc = -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "End of submit, rc %d", rc);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_util_schedule_frame_req(
+ struct cam_lrme_hw_mgr *hw_mgr, struct cam_lrme_device *hw_device)
+{
+ int rc = 0;
+ struct crm_workq_task *task;
+ struct cam_lrme_mgr_work_data *work_data;
+
+ task = cam_req_mgr_workq_get_task(hw_device->work);
+ if (!task) {
+ CAM_ERR(CAM_LRME, "Can not get task for worker");
+ return -ENOMEM;
+ }
+
+ work_data = (struct cam_lrme_mgr_work_data *)task->payload;
+ work_data->hw_device = hw_device;
+
+ task->process_cb = cam_lrme_mgr_util_submit_req;
+ CAM_DBG(CAM_LRME, "enqueue submit task");
+ rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_util_release(struct cam_lrme_hw_mgr *hw_mgr,
+ uint32_t device_index)
+{
+ int rc = 0;
+ struct cam_lrme_device *hw_device;
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ hw_device->num_context--;
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_cb(void *data,
+ struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_hw_mgr *hw_mgr = &g_lrme_hw_mgr;
+ int rc = 0;
+ bool frame_abort = true;
+ struct cam_lrme_frame_request *frame_req;
+ struct cam_lrme_device *hw_device;
+
+ if (!data || !cb_args) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ hw_device = (struct cam_lrme_device *)data;
+ frame_req = cb_args->frame_req;
+
+ if (cb_args->cb_type & CAM_LRME_CB_PUT_FRAME) {
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+ cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req->frame_list,
+ &hw_mgr->free_req_lock);
+ cb_args->cb_type &= ~CAM_LRME_CB_PUT_FRAME;
+ frame_req = NULL;
+ }
+
+ if (cb_args->cb_type & CAM_LRME_CB_COMP_REG_UPDATE) {
+ cb_args->cb_type &= ~CAM_LRME_CB_COMP_REG_UPDATE;
+ CAM_DBG(CAM_LRME, "Reg update");
+ }
+
+ if (!frame_req)
+ return rc;
+
+ if (cb_args->cb_type & CAM_LRME_CB_BUF_DONE) {
+ cb_args->cb_type &= ~CAM_LRME_CB_BUF_DONE;
+ frame_abort = false;
+ } else if (cb_args->cb_type & CAM_LRME_CB_ERROR) {
+ cb_args->cb_type &= ~CAM_LRME_CB_ERROR;
+ frame_abort = true;
+ } else {
+ CAM_ERR(CAM_LRME, "Wrong cb type %d, req %lld",
+ cb_args->cb_type, frame_req->req_id);
+ return -EINVAL;
+ }
+
+ if (hw_mgr->event_cb) {
+ struct cam_hw_done_event_data buf_data;
+
+ buf_data.request_id = frame_req->req_id;
+ CAM_DBG(CAM_LRME, "frame req %llu, frame_abort %d",
+ frame_req->req_id, frame_abort);
+ rc = hw_mgr->event_cb(frame_req->ctxt_to_hw_map,
+ frame_abort, &buf_data);
+ } else {
+ CAM_ERR(CAM_LRME, "No cb function");
+ }
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+ cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req->frame_list,
+ &hw_mgr->free_req_lock);
+
+ rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_query_cap_cmd *args = hw_get_caps_args;
+
+ if (sizeof(struct cam_lrme_query_cap_cmd) != args->size) {
+ CAM_ERR(CAM_LRME,
+ "sizeof(struct cam_query_cap_cmd) = %zu, args->size = %d",
+ sizeof(struct cam_query_cap_cmd), args->size);
+ return -EFAULT;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->caps_handle),
+ &(hw_mgr->lrme_caps),
+ sizeof(struct cam_lrme_query_cap_cmd))) {
+ CAM_ERR(CAM_LRME, "copy to user failed");
+ return -EFAULT;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_acquire_args *args =
+ (struct cam_hw_acquire_args *)hw_acquire_args;
+ struct cam_lrme_acquire_args lrme_acquire_args;
+ uintptr_t device_index;
+
+ if (!hw_mgr_priv || !args) {
+ CAM_ERR(CAM_LRME,
+ "Invalid input params hw_mgr_priv %pK, acquire_args %pK",
+ hw_mgr_priv, args);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&lrme_acquire_args,
+ (void __user *)args->acquire_info,
+ sizeof(struct cam_lrme_acquire_args))) {
+ CAM_ERR(CAM_LRME, "Failed to copy acquire args from user");
+ return -EFAULT;
+ }
+
+ device_index = cam_lrme_mgr_util_reserve_device(hw_mgr,
+ &lrme_acquire_args);
+ CAM_DBG(CAM_LRME, "Get device id %llu", device_index);
+
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Get wrong device id %lu", device_index);
+ return -EINVAL;
+ }
+
+ /* device_index is the right 4 bit in ctxt_to_hw_map */
+ args->ctxt_to_hw_map = (void *)device_index;
+
+ return 0;
+}
+
+static int cam_lrme_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_release_args *args =
+ (struct cam_hw_release_args *)hw_release_args;
+ uint64_t device_index;
+
+ if (!hw_mgr_priv || !hw_release_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments %pK, %pK",
+ hw_mgr_priv, hw_release_args);
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %llu", device_index);
+ return -EPERM;
+ }
+
+ rc = cam_lrme_mgr_util_release(hw_mgr, device_index);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed in release device, rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_flush(void *hw_mgr_priv, void *hw_flush_args)
+{ int rc = 0, i;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_flush_args *args;
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_frame_request *frame_req = NULL, *req_to_flush = NULL;
+ struct cam_lrme_frame_request **req_list = NULL;
+ uint32_t device_index;
+ struct cam_lrme_hw_flush_args lrme_flush_args;
+ uint32_t priority;
+
+ if (!hw_mgr_priv || !hw_flush_args) {
+ CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+ hw_mgr_priv, hw_flush_args);
+ return -EINVAL;
+ }
+
+ args = (struct cam_hw_flush_args *)hw_flush_args;
+ device_index = ((uintptr_t)args->ctxt_to_hw_map & 0xF);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+ goto end;
+ }
+
+ req_list = (struct cam_lrme_frame_request **)args->flush_req_pending;
+ for (i = 0; i < args->num_req_pending; i++) {
+ frame_req = req_list[i];
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req->frame_list, &hw_mgr->free_req_lock);
+ }
+
+ req_list = (struct cam_lrme_frame_request **)args->flush_req_active;
+ for (i = 0; i < args->num_req_active; i++) {
+ frame_req = req_list[i];
+ priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+ spin_lock((priority == CAM_LRME_PRIORITY_HIGH) ?
+ &hw_device->high_req_lock :
+ &hw_device->normal_req_lock);
+ if (!list_empty(&frame_req->frame_list)) {
+ list_del_init(&frame_req->frame_list);
+ cam_lrme_mgr_util_put_frame_req(
+ &hw_mgr->frame_free_list,
+ &frame_req->frame_list,
+ &hw_mgr->free_req_lock);
+ } else
+ req_to_flush = frame_req;
+ spin_unlock((priority == CAM_LRME_PRIORITY_HIGH) ?
+ &hw_device->high_req_lock :
+ &hw_device->normal_req_lock);
+ }
+ if (!req_to_flush)
+ goto end;
+ if (hw_device->hw_intf.hw_ops.flush) {
+ lrme_flush_args.ctxt_to_hw_map = req_to_flush->ctxt_to_hw_map;
+ lrme_flush_args.flush_type = args->flush_type;
+ lrme_flush_args.req_to_flush = req_to_flush;
+ rc = hw_device->hw_intf.hw_ops.flush(hw_device->hw_intf.hw_priv,
+ &lrme_flush_args,
+ sizeof(lrme_flush_args));
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in HW Stop %d", rc);
+ goto end;
+ }
+ } else {
+ CAM_ERR(CAM_LRME, "No stop ops");
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+
+static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_start_args *args =
+ (struct cam_hw_start_args *)hw_start_args;
+ struct cam_lrme_device *hw_device;
+ uint32_t device_index;
+
+ if (!hw_mgr || !args) {
+ CAM_ERR(CAM_LRME, "Invalid input params");
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_LRME, "Start device index %d", device_index);
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw device");
+ return rc;
+ }
+
+ if (hw_device->hw_intf.hw_ops.start) {
+ rc = hw_device->hw_intf.hw_ops.start(
+ hw_device->hw_intf.hw_priv, NULL, 0);
+ } else {
+ CAM_ERR(CAM_LRME, "Invalid start function");
+ return -EINVAL;
+ }
+
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_DUMP_REGISTER,
+ &g_lrme_hw_mgr.debugfs_entry.dump_register,
+ sizeof(bool));
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_stop(void *hw_mgr_priv, void *stop_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_stop_args *args =
+ (struct cam_hw_stop_args *)stop_args;
+ struct cam_lrme_device *hw_device;
+ uint32_t device_index;
+
+ if (!hw_mgr_priv || !stop_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_LRME, "Stop device index %d", device_index);
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw device");
+ return rc;
+ }
+
+ if (hw_device->hw_intf.hw_ops.stop) {
+ rc = hw_device->hw_intf.hw_ops.stop(
+ hw_device->hw_intf.hw_priv, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in HW stop %d", rc);
+ goto end;
+ }
+ }
+
+end:
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_prepare_update(void *hw_mgr_priv,
+ void *hw_prepare_update_args)
+{
+ int rc = 0, i;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_prepare_update_args *args =
+ (struct cam_hw_prepare_update_args *)hw_prepare_update_args;
+ struct cam_lrme_device *hw_device;
+ struct cam_kmd_buf_info kmd_buf;
+ struct cam_lrme_hw_cmd_config_args config_args;
+ struct cam_lrme_frame_request *frame_req = NULL;
+ uint32_t device_index;
+
+ if (!hw_mgr_priv || !hw_prepare_update_args) {
+ CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+ hw_mgr_priv, hw_prepare_update_args);
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_packet_validate(args->packet);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in packet validation %d", rc);
+ goto error;
+ }
+
+ rc = cam_packet_util_get_kmd_buffer(args->packet, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in get kmd buf buffer %d", rc);
+ goto error;
+ }
+
+ CAM_DBG(CAM_LRME,
+ "KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+ kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+ kmd_buf.size, kmd_buf.used_bytes);
+
+ rc = cam_packet_util_process_patches(args->packet,
+ hw_mgr->device_iommu.non_secure, hw_mgr->device_iommu.secure);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Patch packet failed, rc=%d", rc);
+ return rc;
+ }
+
+ memset(&config_args, 0, sizeof(config_args));
+ config_args.hw_device = hw_device;
+
+ rc = cam_lrme_mgr_util_prepare_io_buffer(
+ hw_mgr->device_iommu.non_secure, args,
+ config_args.input_buf, config_args.output_buf,
+ CAM_LRME_MAX_IO_BUFFER);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in prepare IO Buf %d", rc);
+ goto error;
+ }
+ /* Check port number */
+ if (args->num_in_map_entries == 0 || args->num_out_map_entries == 0) {
+ CAM_ERR(CAM_LRME, "Error in port number in %d, out %d",
+ args->num_in_map_entries, args->num_out_map_entries);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_prepare_hw_update_entries(hw_mgr, args,
+ &config_args, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in hw update entries %d", rc);
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+ &frame_req, &hw_mgr->free_req_lock);
+ if (rc || !frame_req) {
+ CAM_ERR(CAM_LRME, "Can not get free frame request");
+ goto error;
+ }
+
+ frame_req->ctxt_to_hw_map = args->ctxt_to_hw_map;
+ frame_req->req_id = args->packet->header.request_id;
+ frame_req->hw_device = hw_device;
+ frame_req->num_hw_update_entries = args->num_hw_update_entries;
+ for (i = 0; i < args->num_hw_update_entries; i++)
+ frame_req->hw_update_entries[i] = args->hw_update_entries[i];
+
+ args->priv = frame_req;
+
+ CAM_DBG(CAM_LRME, "FramePrepare : Frame[%lld]", frame_req->req_id);
+
+ return 0;
+
+error:
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_config(void *hw_mgr_priv,
+ void *hw_config_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_config_args *args =
+ (struct cam_hw_config_args *)hw_config_args;
+ struct cam_lrme_frame_request *frame_req;
+ struct cam_lrme_device *hw_device = NULL;
+ enum cam_lrme_hw_mgr_ctx_priority priority;
+
+ if (!hw_mgr_priv || !hw_config_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments, hw_mgr %pK, config %pK",
+ hw_mgr_priv, hw_config_args);
+ return -EINVAL;
+ }
+
+ if (!args->num_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "No hw update entries");
+ return -EINVAL;
+ }
+
+ frame_req = (struct cam_lrme_frame_request *)args->priv;
+ if (!frame_req) {
+ CAM_ERR(CAM_LRME, "No frame request");
+ return -EINVAL;
+ }
+
+ hw_device = frame_req->hw_device;
+ if (!hw_device)
+ return -EINVAL;
+
+ priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+ if (priority == CAM_LRME_PRIORITY_HIGH) {
+ cam_lrme_mgr_util_put_frame_req(
+ &hw_device->frame_pending_list_high,
+ &frame_req->frame_list, &hw_device->high_req_lock);
+ } else {
+ cam_lrme_mgr_util_put_frame_req(
+ &hw_device->frame_pending_list_normal,
+ &frame_req->frame_list, &hw_device->normal_req_lock);
+ }
+
+ CAM_DBG(CAM_LRME, "schedule req %llu", frame_req->req_id);
+ rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_create_debugfs_entry(void)
+{
+ int rc = 0;
+
+ g_lrme_hw_mgr.debugfs_entry.dentry =
+ debugfs_create_dir("camera_lrme", NULL);
+ if (!g_lrme_hw_mgr.debugfs_entry.dentry) {
+ CAM_ERR(CAM_LRME, "failed to create dentry");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_bool("dump_register",
+ 0644,
+ g_lrme_hw_mgr.debugfs_entry.dentry,
+ &g_lrme_hw_mgr.debugfs_entry.dump_register)) {
+ CAM_ERR(CAM_LRME, "failed to create dump register entry");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ return rc;
+
+err:
+ debugfs_remove_recursive(g_lrme_hw_mgr.debugfs_entry.dentry);
+ g_lrme_hw_mgr.debugfs_entry.dentry = NULL;
+ return rc;
+}
+
+
+int cam_lrme_mgr_register_device(
+ struct cam_hw_intf *lrme_hw_intf,
+ struct cam_iommu_handle *device_iommu,
+ struct cam_iommu_handle *cdm_iommu)
+{
+ struct cam_lrme_device *hw_device;
+ char buf[128];
+ int i, rc;
+
+ hw_device = &g_lrme_hw_mgr.hw_device[lrme_hw_intf->hw_idx];
+
+ g_lrme_hw_mgr.device_iommu = *device_iommu;
+ g_lrme_hw_mgr.cdm_iommu = *cdm_iommu;
+
+ memcpy(&hw_device->hw_intf, lrme_hw_intf, sizeof(struct cam_hw_intf));
+
+ spin_lock_init(&hw_device->high_req_lock);
+ spin_lock_init(&hw_device->normal_req_lock);
+ INIT_LIST_HEAD(&hw_device->frame_pending_list_high);
+ INIT_LIST_HEAD(&hw_device->frame_pending_list_normal);
+
+ rc = snprintf(buf, sizeof(buf), "cam_lrme_device_submit_worker%d",
+ lrme_hw_intf->hw_idx);
+ CAM_DBG(CAM_LRME, "Create submit workq for %s", buf);
+ rc = cam_req_mgr_workq_create(buf,
+ CAM_LRME_WORKQ_NUM_TASK,
+ &hw_device->work, CRM_WORKQ_USAGE_NON_IRQ,
+ 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Unable to create a worker, rc=%d", rc);
+ return rc;
+ }
+
+ for (i = 0; i < CAM_LRME_WORKQ_NUM_TASK; i++)
+ hw_device->work->task.pool[i].payload =
+ &hw_device->work_data[i];
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ struct cam_lrme_hw_cmd_set_cb cb_args;
+
+ cb_args.cam_lrme_hw_mgr_cb = cam_lrme_mgr_cb;
+ cb_args.data = hw_device;
+
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_REGISTER_CB,
+ &cb_args, sizeof(cb_args));
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Register cb failed");
+ goto destroy_workqueue;
+ }
+ CAM_DBG(CAM_LRME, "cb registered");
+ }
+
+ if (hw_device->hw_intf.hw_ops.get_hw_caps) {
+ rc = hw_device->hw_intf.hw_ops.get_hw_caps(
+ hw_device->hw_intf.hw_priv, &hw_device->hw_caps,
+ sizeof(hw_device->hw_caps));
+ if (rc)
+ CAM_ERR(CAM_LRME, "Get caps failed");
+ } else {
+ CAM_ERR(CAM_LRME, "No get_hw_caps function");
+ goto destroy_workqueue;
+ }
+ g_lrme_hw_mgr.lrme_caps.dev_caps[lrme_hw_intf->hw_idx] =
+ hw_device->hw_caps;
+ g_lrme_hw_mgr.device_count++;
+ g_lrme_hw_mgr.lrme_caps.device_iommu = g_lrme_hw_mgr.device_iommu;
+ g_lrme_hw_mgr.lrme_caps.cdm_iommu = g_lrme_hw_mgr.cdm_iommu;
+ g_lrme_hw_mgr.lrme_caps.num_devices = g_lrme_hw_mgr.device_count;
+
+ hw_device->valid = true;
+
+ CAM_DBG(CAM_LRME, "device registration done");
+ return 0;
+
+destroy_workqueue:
+ cam_req_mgr_workq_destroy(&hw_device->work);
+
+ return rc;
+}
+
+int cam_lrme_mgr_deregister_device(int device_index)
+{
+ struct cam_lrme_device *hw_device;
+
+ hw_device = &g_lrme_hw_mgr.hw_device[device_index];
+ cam_req_mgr_workq_destroy(&hw_device->work);
+ memset(hw_device, 0x0, sizeof(struct cam_lrme_device));
+ g_lrme_hw_mgr.device_count--;
+
+ return 0;
+}
+
+int cam_lrme_hw_mgr_deinit(void)
+{
+ mutex_destroy(&g_lrme_hw_mgr.hw_mgr_mutex);
+ memset(&g_lrme_hw_mgr, 0x0, sizeof(g_lrme_hw_mgr));
+
+ return 0;
+}
+
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+ cam_hw_event_cb_func cam_lrme_dev_buf_done_cb)
+{
+ int i, rc = 0;
+ struct cam_lrme_frame_request *frame_req;
+
+ if (!hw_mgr_intf)
+ return -EINVAL;
+
+ CAM_DBG(CAM_LRME, "device count %d", g_lrme_hw_mgr.device_count);
+ if (g_lrme_hw_mgr.device_count > CAM_LRME_HW_MAX) {
+ CAM_ERR(CAM_LRME, "Invalid count of devices");
+ return -EINVAL;
+ }
+
+ memset(hw_mgr_intf, 0, sizeof(*hw_mgr_intf));
+
+ mutex_init(&g_lrme_hw_mgr.hw_mgr_mutex);
+ spin_lock_init(&g_lrme_hw_mgr.free_req_lock);
+ INIT_LIST_HEAD(&g_lrme_hw_mgr.frame_free_list);
+
+ /* Init hw mgr frame requests and add to free list */
+ for (i = 0; i < CAM_CTX_REQ_MAX * CAM_CTX_MAX; i++) {
+ frame_req = &g_lrme_hw_mgr.frame_req[i];
+
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+
+ list_add_tail(&frame_req->frame_list,
+ &g_lrme_hw_mgr.frame_free_list);
+ }
+
+ hw_mgr_intf->hw_mgr_priv = &g_lrme_hw_mgr;
+ hw_mgr_intf->hw_get_caps = cam_lrme_mgr_get_caps;
+ hw_mgr_intf->hw_acquire = cam_lrme_mgr_hw_acquire;
+ hw_mgr_intf->hw_release = cam_lrme_mgr_hw_release;
+ hw_mgr_intf->hw_start = cam_lrme_mgr_hw_start;
+ hw_mgr_intf->hw_stop = cam_lrme_mgr_hw_stop;
+ hw_mgr_intf->hw_prepare_update = cam_lrme_mgr_hw_prepare_update;
+ hw_mgr_intf->hw_config = cam_lrme_mgr_hw_config;
+ hw_mgr_intf->hw_read = NULL;
+ hw_mgr_intf->hw_write = NULL;
+ hw_mgr_intf->hw_close = NULL;
+ hw_mgr_intf->hw_flush = cam_lrme_mgr_hw_flush;
+
+ g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
+
+ cam_lrme_mgr_create_debugfs_entry();
+
+ CAM_DBG(CAM_LRME, "Hw mgr init done");
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
new file mode 100644
index 000000000000..87419cf59e00
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_MGR_H_
+#define _CAM_LRME_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <media/cam_lrme.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_context.h"
+
+#define CAM_LRME_HW_MAX 1
+#define CAM_LRME_WORKQ_NUM_TASK 10
+
+#define CAM_LRME_DECODE_DEVICE_INDEX(ctxt_to_hw_map) \
+ ((uintptr_t)ctxt_to_hw_map & 0xF)
+
+#define CAM_LRME_DECODE_PRIORITY(ctxt_to_hw_map) \
+ (((uintptr_t)ctxt_to_hw_map & 0xF0) >> 4)
+
+#define CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map) \
+ ((uint64_t)(uintptr_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
+
+/**
+ * enum cam_lrme_hw_mgr_ctx_priority
+ *
+ * CAM_LRME_PRIORITY_HIGH : High priority client
+ * CAM_LRME_PRIORITY_NORMAL : Normal priority client
+ */
+enum cam_lrme_hw_mgr_ctx_priority {
+ CAM_LRME_PRIORITY_HIGH,
+ CAM_LRME_PRIORITY_NORMAL,
+};
+
+/**
+ * struct cam_lrme_mgr_work_data : HW Mgr work data
+ *
+ * @hw_device : Pointer to the hw device
+ */
+struct cam_lrme_mgr_work_data {
+ struct cam_lrme_device *hw_device;
+};
+
+/**
+ * struct cam_lrme_debugfs_entry : debugfs entry struct
+ *
+ * @dentry : entry of debugfs
+ * @dump_register : flag to dump registers
+ */
+struct cam_lrme_debugfs_entry {
+ struct dentry *dentry;
+ bool dump_register;
+};
+
+/**
+ * struct cam_lrme_device : LRME HW device
+ *
+ * @hw_caps : HW device's capabilities
+ * @hw_intf : HW device's interface information
+ * @num_context : Number of contexts using this device
+ * @valid : Whether this device is valid
+ * @work : HW device's work queue
+ * @work_data : HW device's work data
+ * @frame_pending_list_high : High priority request queue
+ * @frame_pending_list_normal : Normal priority request queue
+ * @high_req_lock : Spinlock of high priority queue
+ * @normal_req_lock : Spinlock of normal priority queue
+ */
+struct cam_lrme_device {
+ struct cam_lrme_dev_cap hw_caps;
+ struct cam_hw_intf hw_intf;
+ uint32_t num_context;
+ bool valid;
+ struct cam_req_mgr_core_workq *work;
+ struct cam_lrme_mgr_work_data work_data[CAM_LRME_WORKQ_NUM_TASK];
+ struct list_head frame_pending_list_high;
+ struct list_head frame_pending_list_normal;
+ spinlock_t high_req_lock;
+ spinlock_t normal_req_lock;
+};
+
+/**
+ * struct cam_lrme_hw_mgr : LRME HW manager
+ *
+ * @device_count : Number of HW devices
+ * @frame_free_list : List of free frame request
+ * @hw_mgr_mutex : Mutex to protect HW manager data
+ * @free_req_lock :Spinlock to protect frame_free_list
+ * @hw_device : List of HW devices
+ * @device_iommu : Device iommu
+ * @cdm_iommu : cdm iommu
+ * @frame_req : List of frame request to use
+ * @lrme_caps : LRME capabilities
+ * @event_cb : IRQ callback function
+ * @debugfs_entry : debugfs entry to set debug prop
+ */
+struct cam_lrme_hw_mgr {
+ uint32_t device_count;
+ struct list_head frame_free_list;
+ struct mutex hw_mgr_mutex;
+ spinlock_t free_req_lock;
+ struct cam_lrme_device hw_device[CAM_LRME_HW_MAX];
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX];
+ struct cam_lrme_query_cap_cmd lrme_caps;
+ cam_hw_event_cb_func event_cb;
+ struct cam_lrme_debugfs_entry debugfs_entry;
+};
+
+int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf,
+ struct cam_iommu_handle *device_iommu,
+ struct cam_iommu_handle *cdm_iommu);
+int cam_lrme_mgr_deregister_device(int device_index);
+
+#endif /* _CAM_LRME_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
new file mode 100644
index 000000000000..76a94e0d4877
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_MGR_INTF_H_
+#define _CAM_LRME_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+ cam_hw_event_cb_func cam_lrme_dev_buf_done_cb);
+int cam_lrme_hw_mgr_deinit(void);
+
+#endif /* _CAM_LRME_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
new file mode 100644
index 000000000000..e6d53487a9dd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera0
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_dev.o cam_lrme_hw_core.o cam_lrme_hw_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
new file mode 100644
index 000000000000..8cb1c9c28e7a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -0,0 +1,1273 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_smmu_api.h"
+
+static void cam_lrme_dump_registers(void __iomem *base)
+{
+ /* dump the clc registers */
+ cam_io_dump(base, 0x60, (0xc0 - 0x60) / 0x4);
+ /* dump the fe and we registers */
+ cam_io_dump(base, 0x200, (0x29c - 0x200) / 0x4);
+ cam_io_dump(base, 0x2f0, (0x330 - 0x2f0) / 0x4);
+ cam_io_dump(base, 0x500, (0x5b4 - 0x500) / 0x4);
+ cam_io_dump(base, 0x700, (0x778 - 0x700) / 0x4);
+ cam_io_dump(base, 0x800, (0x878 - 0x800) / 0x4);
+ /* dump lrme sw registers, interrupts */
+ cam_io_dump(base, 0x900, (0x928 - 0x900) / 0x4);
+}
+
+static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer,
+ uint32_t *index, uint32_t reg_offset, uint32_t reg_value)
+{
+ buffer[(*index)++] = reg_offset;
+ buffer[(*index)++] = reg_value;
+}
+
+static void cam_lrme_hw_util_fill_fe_reg(struct cam_lrme_hw_io_buffer *io_buf,
+ uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+ struct cam_lrme_hw_info *hw_info)
+{
+ uint32_t reg_val;
+
+ /* 1. config buffer size */
+ reg_val = io_buf->io_cfg->planes[0].width;
+ reg_val |= (io_buf->io_cfg->planes[0].height << 16);
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].rd_buffer_size,
+ reg_val);
+
+ CAM_DBG(CAM_LRME,
+ "width %d", io_buf->io_cfg->planes[0].width);
+ CAM_DBG(CAM_LRME,
+ "height %d", io_buf->io_cfg->planes[0].height);
+
+ /* 2. config image address */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].addr_image,
+ io_buf->io_addr[0]);
+
+ CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+ /* 3. config stride */
+ reg_val = io_buf->io_cfg->planes[0].plane_stride;
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].rd_stride,
+ reg_val);
+
+ CAM_DBG(CAM_LRME, "plane_stride %d",
+ io_buf->io_cfg->planes[0].plane_stride);
+
+ /* 4. enable client */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].core_cfg, 0x1);
+
+ /* 5. unpack_cfg */
+ if (io_buf->io_cfg->format == CAM_FORMAT_PD10)
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+ 0x0);
+ else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY ||
+ io_buf->io_cfg->format == CAM_FORMAT_PLAIN8 ||
+ io_buf->io_cfg->format == CAM_FORMAT_NV12 ||
+ io_buf->io_cfg->format == CAM_FORMAT_NV21)
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+ 0x1);
+ else if (io_buf->io_cfg->format == CAM_FORMAT_PLAIN16_10)
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+ 0x22);
+ else
+ CAM_ERR(CAM_LRME, "Unsupported format %d",
+ io_buf->io_cfg->format);
+}
+
+static void cam_lrme_hw_util_fill_we_reg(struct cam_lrme_hw_io_buffer *io_buf,
+ uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+ struct cam_lrme_hw_info *hw_info)
+{
+ /* config client mode */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].cfg,
+ 0x1);
+
+ /* image address */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].addr_image,
+ io_buf->io_addr[0]);
+ CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+ /* buffer width and height */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].buffer_width_cfg,
+ io_buf->io_cfg->planes[0].width);
+ CAM_DBG(CAM_LRME, "width %d", io_buf->io_cfg->planes[0].width);
+
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].buffer_height_cfg,
+ io_buf->io_cfg->planes[0].height);
+ CAM_DBG(CAM_LRME, "height %d", io_buf->io_cfg->planes[0].height);
+
+ /* packer cfg */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].packer_cfg,
+ (index == 0) ? 0x1 : 0x5);
+
+ /* client stride */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].wr_stride,
+ io_buf->io_cfg->planes[0].plane_stride);
+ CAM_DBG(CAM_LRME, "plane_stride %d",
+ io_buf->io_cfg->planes[0].plane_stride);
+}
+
+
+static int cam_lrme_hw_util_process_config_hw(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_hw_cmd_config_args *config_args)
+{
+ int i;
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+ uint32_t *cmd_buf_addr = config_args->cmd_buf_addr;
+ uint32_t reg_val_pair[CAM_LRME_MAX_REG_PAIR_NUM];
+ struct cam_lrme_hw_io_buffer *io_buf;
+ struct cam_lrme_hw_info *hw_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+ uint32_t num_cmd = 0;
+ uint32_t size;
+ uint32_t mem_base, available_size = config_args->size;
+ uint32_t output_res_mask = 0, input_res_mask = 0;
+
+
+ if (!cmd_buf_addr) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ hw_cdm_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_cdm_info;
+
+ for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+ io_buf = &config_args->input_buf[i];
+
+ if (io_buf->valid == false)
+ break;
+
+ if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+ CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+ io_buf->io_cfg->direction, CAM_BUF_INPUT);
+ return -EINVAL;
+ }
+ CAM_DBG(CAM_LRME,
+ "resource_type %d", io_buf->io_cfg->resource_type);
+
+ switch (io_buf->io_cfg->resource_type) {
+ case CAM_LRME_IO_TYPE_TAR:
+ cam_lrme_hw_util_fill_fe_reg(io_buf, 0, reg_val_pair,
+ &num_cmd, hw_info);
+
+ input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_TAR;
+ break;
+ case CAM_LRME_IO_TYPE_REF:
+ cam_lrme_hw_util_fill_fe_reg(io_buf, 1, reg_val_pair,
+ &num_cmd, hw_info);
+
+ input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_REF;
+ break;
+ default:
+ CAM_ERR(CAM_LRME, "wrong resource_type %d",
+ io_buf->io_cfg->resource_type);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+ if (!((input_res_mask >> i) & 0x1))
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[i].core_cfg,
+ 0x0);
+
+ for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+ io_buf = &config_args->output_buf[i];
+
+ if (io_buf->valid == false)
+ break;
+
+ if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+ CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+ io_buf->io_cfg->direction, CAM_BUF_INPUT);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "resource_type %d",
+ io_buf->io_cfg->resource_type);
+ switch (io_buf->io_cfg->resource_type) {
+ case CAM_LRME_IO_TYPE_DS2:
+ cam_lrme_hw_util_fill_we_reg(io_buf, 0, reg_val_pair,
+ &num_cmd, hw_info);
+
+ output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_DS2;
+ break;
+ case CAM_LRME_IO_TYPE_RES:
+ cam_lrme_hw_util_fill_we_reg(io_buf, 1, reg_val_pair,
+ &num_cmd, hw_info);
+
+ output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_RES;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "wrong resource_type %d",
+ io_buf->io_cfg->resource_type);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+ if (!((output_res_mask >> i) & 0x1))
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[i].cfg, 0x0);
+
+ if (output_res_mask) {
+ /* write composite mask */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_wr_reg.common_reg.composite_mask_0,
+ output_res_mask);
+ }
+
+ size = hw_cdm_info->cdm_ops->cdm_required_size_changebase();
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+ available_size, size);
+ return -EINVAL;
+ }
+
+ mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info, CAM_LRME_BASE_IDX);
+
+ hw_cdm_info->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ size = hw_cdm_info->cdm_ops->cdm_required_size_reg_random(
+ num_cmd / 2);
+
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+ available_size, size);
+ return -ENOMEM;
+ }
+
+ hw_cdm_info->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmd / 2,
+ reg_val_pair);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ config_args->config_buf_size =
+ config_args->size - available_size;
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_submit_go(struct cam_hw_info *lrme_hw)
+{
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_lrme_hw_info *hw_info;
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ hw_info = lrme_core->hw_info;
+ soc_info = &lrme_hw->soc_info;
+
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.cmd);
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_reset(struct cam_hw_info *lrme_hw,
+ uint32_t reset_type)
+{
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_hw_info *hw_info;
+ long time_left;
+
+ lrme_core = lrme_hw->core_info;
+ hw_info = lrme_core->hw_info;
+
+ switch (reset_type) {
+ case CAM_LRME_HW_RESET_TYPE_HW_RESET:
+ reinit_completion(&lrme_core->reset_complete);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_rst_cmd);
+ time_left = wait_for_completion_timeout(
+ &lrme_core->reset_complete,
+ msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+ if (time_left <= 0) {
+ CAM_ERR(CAM_LRME,
+ "HW reset wait failed time_left=%ld",
+ time_left);
+ return -ETIMEDOUT;
+ }
+ break;
+ case CAM_LRME_HW_RESET_TYPE_SW_RESET:
+ cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.sw_reset);
+ cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.sw_reset);
+ reinit_completion(&lrme_core->reset_complete);
+ cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_rst_cmd);
+ time_left = wait_for_completion_timeout(
+ &lrme_core->reset_complete,
+ msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+ if (time_left <= 0) {
+ CAM_ERR(CAM_LRME,
+ "SW reset wait failed time_left=%ld",
+ time_left);
+ return -ETIMEDOUT;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_dev_cap *hw_caps)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_hw_info *hw_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+ uint32_t reg_value;
+
+ if (!hw_info) {
+ CAM_ERR(CAM_LRME, "Invalid hw info data");
+ return -EINVAL;
+ }
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->clc_reg.clc_hw_version);
+ hw_caps->clc_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->clc_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->clc_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.hw_version);
+ hw_caps->bus_rd_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->bus_rd_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->bus_rd_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.hw_version);
+ hw_caps->bus_wr_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->bus_wr_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->bus_wr_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_hw_version);
+ hw_caps->top_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->top_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->top_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_titan_version);
+ hw_caps->top_titan_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->top_titan_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->top_titan_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_submit_req(struct cam_lrme_core *lrme_core,
+ struct cam_lrme_frame_request *frame_req)
+{
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+ struct cam_cdm_bl_request *cdm_cmd = hw_cdm_info->cdm_cmd;
+ struct cam_hw_update_entry *cmd;
+ int i, rc = 0;
+
+ if (frame_req->num_hw_update_entries > 0) {
+ cdm_cmd->cmd_arrary_count = frame_req->num_hw_update_entries;
+ cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+ cdm_cmd->flag = false;
+ cdm_cmd->userdata = NULL;
+ cdm_cmd->cookie = 0;
+
+ for (i = 0; i <= frame_req->num_hw_update_entries; i++) {
+ cmd = (frame_req->hw_update_entries + i);
+ cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+ cdm_cmd->cmd[i].offset = cmd->offset;
+ cdm_cmd->cmd[i].len = cmd->len;
+ }
+
+ rc = cam_cdm_submit_bls(hw_cdm_info->cdm_handle, cdm_cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to submit cdm commands");
+ return -EINVAL;
+ }
+ } else {
+ CAM_ERR(CAM_LRME, "No hw update entry");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_hw_util_flush_ctx(struct cam_hw_info *lrme_hw,
+ void *ctxt_to_hw_map)
+{
+ int rc = -ENODEV;
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_hw_cb_args cb_args;
+ struct cam_lrme_frame_request *req_proc, *req_submit;
+ struct cam_lrme_hw_submit_args submit_args;
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "reset failed");
+ return rc;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+ req_proc = lrme_core->req_proc;
+ req_submit = lrme_core->req_submit;
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+
+ if (req_submit && req_submit->ctxt_to_hw_map == ctxt_to_hw_map) {
+ cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+ cb_args.frame_req = req_submit;
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+ lrme_core->hw_mgr_cb.data, &cb_args);
+ } else if (req_submit) {
+ submit_args.frame_req = req_submit;
+ submit_args.hw_update_entries = req_submit->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ req_submit->num_hw_update_entries;
+ rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Submit failed");
+ lrme_core->req_submit = req_submit;
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ }
+
+ if (req_proc && req_proc->ctxt_to_hw_map == ctxt_to_hw_map) {
+ cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+ cb_args.frame_req = req_proc;
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+ lrme_core->hw_mgr_cb.data, &cb_args);
+ } else if (req_proc) {
+ submit_args.frame_req = req_proc;
+ submit_args.hw_update_entries = req_proc->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ req_proc->num_hw_update_entries;
+ rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Submit failed");
+ lrme_core->req_submit = req_proc;
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_hw_util_flush_req(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_frame_request *req_to_flush)
+{
+ int rc = -ENODEV;
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_hw_cb_args cb_args;
+ struct cam_lrme_frame_request *req_proc, *req_submit;
+ struct cam_lrme_hw_submit_args submit_args;
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "reset failed");
+ return rc;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+ req_proc = lrme_core->req_proc;
+ req_submit = lrme_core->req_submit;
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+
+ if (req_submit && req_submit == req_to_flush) {
+ cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+ cb_args.frame_req = req_submit;
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+ lrme_core->hw_mgr_cb.data, &cb_args);
+ } else if (req_submit) {
+ submit_args.frame_req = req_submit;
+ submit_args.hw_update_entries = req_submit->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ req_submit->num_hw_update_entries;
+ rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Submit failed");
+ lrme_core->req_submit = req_submit;
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ }
+
+ if (req_proc && req_proc == req_to_flush) {
+ cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+ cb_args.frame_req = req_proc;
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+ lrme_core->hw_mgr_cb.data, &cb_args);
+ } else if (req_proc) {
+ submit_args.frame_req = req_proc;
+ submit_args.hw_update_entries = req_proc->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ req_proc->num_hw_update_entries;
+ rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Submit failed");
+ lrme_core->req_submit = req_proc;
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ }
+
+ return rc;
+}
+
+
+static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_frame_request *req_proc, *req_submit;
+ struct cam_lrme_hw_cb_args cb_args;
+ int rc;
+
+ req_proc = lrme_core->req_proc;
+ req_submit = lrme_core->req_submit;
+ cb_args.cb_type = CAM_LRME_CB_ERROR;
+
+ if ((lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) &&
+ (lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING) &&
+ (lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND)) {
+ CAM_ERR(CAM_LRME, "Get error irq in wrong state %d",
+ lrme_core->state);
+ }
+
+ cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base);
+
+ CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery");
+ lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to reset");
+
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+ if (!rc)
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ cb_args.frame_req = req_proc;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+ &cb_args);
+
+ cb_args.frame_req = req_submit;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+ &cb_args);
+
+ return rc;
+}
+
+static int cam_lrme_hw_util_process_reg_update(
+ struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ int rc = 0;
+
+ cb_args->cb_type |= CAM_LRME_CB_COMP_REG_UPDATE;
+ if (lrme_core->state == CAM_LRME_CORE_STATE_REQ_PENDING) {
+ lrme_core->state = CAM_LRME_CORE_STATE_PROCESSING;
+ } else {
+ CAM_ERR(CAM_LRME, "Reg update in wrong state %d",
+ lrme_core->state);
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to reset");
+ return -EINVAL;
+ }
+
+ lrme_core->req_proc = lrme_core->req_submit;
+ lrme_core->req_submit = NULL;
+
+ if (lrme_core->dump_flag)
+ cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base);
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_process_idle(
+ struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ int rc = 0;
+
+ cb_args->cb_type |= CAM_LRME_CB_BUF_DONE;
+ switch (lrme_core->state) {
+ case CAM_LRME_CORE_STATE_REQ_PROC_PEND:
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ break;
+
+ case CAM_LRME_CORE_STATE_PROCESSING:
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "Idle in wrong state %d",
+ lrme_core->state);
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ return rc;
+ }
+ cb_args->frame_req = lrme_core->req_proc;
+ lrme_core->req_proc = NULL;
+
+ return 0;
+}
+
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw,
+ enum cam_lrme_irq_set set)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_hw_info *hw_info = lrme_core->hw_info;
+
+ switch (set) {
+ case CAM_LRME_IRQ_ENABLE:
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_mask);
+ cam_io_w_mb(0xFFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_0);
+ cam_io_w_mb(0xFFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_1);
+ cam_io_w_mb(0xFFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_mask);
+ break;
+
+ case CAM_LRME_IRQ_DISABLE:
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_mask);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_0);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_1);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_mask);
+ break;
+ }
+}
+
+
+int cam_lrme_hw_process_irq(void *priv, void *data)
+{
+ struct cam_lrme_hw_work_data *work_data;
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ int rc = 0;
+ uint32_t top_irq_status, fe_irq_status;
+ uint32_t *we_irq_status;
+ struct cam_lrme_hw_cb_args cb_args;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_LRME, "Invalid data %pK %pK", data, priv);
+ return -EINVAL;
+ }
+
+ memset(&cb_args, 0, sizeof(struct cam_lrme_hw_cb_args));
+ lrme_hw = (struct cam_hw_info *)priv;
+ work_data = (struct cam_lrme_hw_work_data *)data;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ top_irq_status = work_data->top_irq_status;
+ fe_irq_status = work_data->fe_irq_status;
+ we_irq_status = work_data->we_irq_status;
+
+ CAM_DBG(CAM_LRME,
+ "top status %x, fe status %x, we status0 %x, we status1 %x",
+ top_irq_status, fe_irq_status, we_irq_status[0],
+ we_irq_status[1]);
+ CAM_DBG(CAM_LRME, "Current state %d", lrme_core->state);
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (top_irq_status & (1 << 3)) {
+ CAM_DBG(CAM_LRME, "Error");
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Process error failed");
+ goto end;
+ }
+
+ if (we_irq_status[0] & (1 << 1)) {
+ CAM_DBG(CAM_LRME, "reg update");
+ rc = cam_lrme_hw_util_process_reg_update(lrme_hw, &cb_args);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Process reg_update failed");
+ goto end;
+ }
+ }
+
+ if (top_irq_status & (1 << 4)) {
+ CAM_DBG(CAM_LRME, "IDLE");
+ if (!lrme_core->req_proc) {
+ CAM_DBG(CAM_LRME, "No frame request to process idle");
+ goto end;
+ }
+ rc = cam_lrme_hw_util_process_idle(lrme_hw, &cb_args);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Process idle failed");
+ goto end;
+ }
+ }
+
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) {
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+ lrme_core->hw_mgr_cb.data, &cb_args);
+ } else {
+ CAM_ERR(CAM_LRME, "No hw mgr cb");
+ rc = -EINVAL;
+ }
+
+end:
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+ struct cam_lrme_core *lrme_core;
+
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME,
+ "Invalid input params, lrme_hw %pK",
+ lrme_hw);
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count > 0) {
+ lrme_hw->open_count++;
+ CAM_DBG(CAM_LRME, "This device is activated before");
+ goto unlock;
+ }
+
+ rc = cam_lrme_soc_enable_resources(lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to enable soc resources");
+ goto unlock;
+ }
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to reset hw");
+ goto disable_soc;
+ }
+
+ if (lrme_core->hw_cdm_info) {
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+
+ rc = cam_cdm_stream_on(hw_cdm_info->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stream on cdm");
+ goto disable_soc;
+ }
+ }
+
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ lrme_hw->open_count++;
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+
+disable_soc:
+ if (cam_lrme_soc_disable_resources(lrme_hw))
+ CAM_ERR(CAM_LRME, "Error in disable soc resources");
+unlock:
+ CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_stop(void *hw_priv, void *hw_stop_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+ struct cam_lrme_core *lrme_core;
+
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "Invalid argument");
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count == 0 ||
+ lrme_hw->hw_state == CAM_HW_STATE_POWER_DOWN) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_LRME, "Error Unbalanced stop");
+ return -EINVAL;
+ }
+ lrme_hw->open_count--;
+
+ CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
+
+ if (lrme_hw->open_count)
+ goto unlock;
+
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+
+ if (lrme_core->hw_cdm_info) {
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+
+ rc = cam_cdm_stream_off(hw_cdm_info->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed in CDM StreamOff, handle=0x%x, rc=%d",
+ hw_cdm_info->cdm_handle, rc);
+ goto unlock;
+ }
+ }
+
+ rc = cam_lrme_soc_disable_resources(lrme_hw);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed in Disable SOC, rc=%d", rc);
+
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ if (lrme_core->state == CAM_LRME_CORE_STATE_IDLE) {
+ lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+ } else {
+ CAM_ERR(CAM_LRME, "HW in wrong state %d", lrme_core->state);
+ return -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_hw_submit_args *args =
+ (struct cam_lrme_hw_submit_args *)hw_submit_args;
+ int rc = 0;
+ struct cam_lrme_frame_request *frame_req;
+
+
+ if (!hw_priv || !hw_submit_args) {
+ CAM_ERR(CAM_LRME, "Invalid input");
+ return -EINVAL;
+ }
+
+ if (sizeof(struct cam_lrme_hw_submit_args) != arg_size) {
+ CAM_ERR(CAM_LRME,
+ "size of args %zu, arg_size %d",
+ sizeof(struct cam_lrme_hw_submit_args), arg_size);
+ return -EINVAL;
+ }
+
+ frame_req = args->frame_req;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count == 0) {
+ CAM_ERR(CAM_LRME, "HW is not open");
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ if (lrme_core->state != CAM_LRME_CORE_STATE_IDLE &&
+ lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "device busy, can not submit, state %d",
+ lrme_core->state);
+ return -EBUSY;
+ }
+
+ if (lrme_core->req_submit != NULL) {
+ CAM_ERR(CAM_LRME, "req_submit is not NULL");
+ return -EBUSY;
+ }
+
+ rc = cam_lrme_hw_util_submit_req(lrme_core, frame_req);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Submit req failed");
+ goto error;
+ }
+
+ switch (lrme_core->state) {
+ case CAM_LRME_CORE_STATE_PROCESSING:
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PROC_PEND;
+ break;
+
+ case CAM_LRME_CORE_STATE_IDLE:
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "Wrong hw state");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ lrme_core->req_submit = frame_req;
+
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu",
+ frame_req->req_id);
+
+ return 0;
+
+error:
+ mutex_unlock(&lrme_hw->hw_mutex);
+
+ return rc;
+
+}
+
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = hw_priv;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_hw_reset_args *lrme_reset_args = reset_core_args;
+ int rc;
+
+ if (!hw_priv) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ if (!reset_core_args ||
+ sizeof(struct cam_lrme_hw_reset_args) != arg_size) {
+ CAM_ERR(CAM_LRME, "Invalid reset args");
+ return -EINVAL;
+ }
+
+ lrme_core = lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+ if (lrme_core->state == CAM_LRME_CORE_STATE_RECOVERY) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_LRME, "Reset not allowed in %d state",
+ lrme_core->state);
+ return -EINVAL;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, lrme_reset_args->reset_type);
+ if (rc) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_FD, "Failed to reset");
+ return rc;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ mutex_unlock(&lrme_hw->hw_mutex);
+
+ return 0;
+}
+
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size)
+{
+ struct cam_lrme_core *lrme_core = NULL;
+ struct cam_hw_info *lrme_hw = hw_priv;
+ struct cam_lrme_hw_flush_args *flush_args =
+ (struct cam_lrme_hw_flush_args *)hw_flush_args;
+ int rc = -ENODEV;
+
+ if (!hw_priv) {
+ CAM_ERR(CAM_LRME, "Invalid arguments %pK", hw_priv);
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING &&
+ lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING &&
+ lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "Flush is not needed in %d state",
+ lrme_core->state);
+ return 0;
+ }
+
+ if (!lrme_core->req_proc && !lrme_core->req_submit) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "no req in device");
+ return 0;
+ }
+
+ switch (flush_args->flush_type) {
+ case CAM_FLUSH_TYPE_ALL:
+ if ((!lrme_core->req_submit ||
+ lrme_core->req_submit->ctxt_to_hw_map !=
+ flush_args->ctxt_to_hw_map) &&
+ (!lrme_core->req_proc ||
+ lrme_core->req_proc->ctxt_to_hw_map !=
+ flush_args->ctxt_to_hw_map)) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "hw running on different ctx");
+ return 0;
+ }
+ rc = cam_lrme_hw_util_flush_ctx(lrme_hw,
+ flush_args->ctxt_to_hw_map);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Flush all failed");
+ break;
+
+ case CAM_FLUSH_TYPE_REQ:
+ if ((!lrme_core->req_submit ||
+ lrme_core->req_submit != flush_args->req_to_flush) &&
+ (!lrme_core->req_proc ||
+ lrme_core->req_proc != flush_args->req_to_flush)) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "hw running on different ctx");
+ return 0;
+ }
+ rc = cam_lrme_hw_util_flush_req(lrme_hw,
+ flush_args->req_to_flush);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Flush req failed");
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "Unsupported flush type");
+ break;
+ }
+
+ mutex_unlock(&lrme_hw->hw_mutex);
+
+ return rc;
+}
+
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_dev_cap *lrme_hw_caps =
+ (struct cam_lrme_dev_cap *)get_hw_cap_args;
+
+ if (!hw_priv || !get_hw_cap_args) {
+ CAM_ERR(CAM_LRME, "Invalid input pointers %pK %pK",
+ hw_priv, get_hw_cap_args);
+ return -EINVAL;
+ }
+
+ lrme_hw = (struct cam_hw_info *)hw_priv;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ *lrme_hw_caps = lrme_core->hw_caps;
+
+ return 0;
+}
+
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_lrme_hw_info *hw_info;
+ struct crm_workq_task *task;
+ struct cam_lrme_hw_work_data *work_data;
+ uint32_t top_irq_status, fe_irq_status, we_irq_status0, we_irq_status1;
+ int rc;
+
+ if (!data) {
+ CAM_ERR(CAM_LRME, "Invalid data in IRQ callback");
+ return IRQ_NONE;
+ }
+
+ lrme_hw = (struct cam_hw_info *)data;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ soc_info = &lrme_hw->soc_info;
+ hw_info = lrme_core->hw_info;
+
+ top_irq_status = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_status);
+ CAM_DBG(CAM_LRME, "top_irq_status %x", top_irq_status);
+ cam_io_w_mb(top_irq_status,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_clear);
+ top_irq_status &= CAM_LRME_TOP_IRQ_MASK;
+
+ fe_irq_status = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_status);
+ CAM_DBG(CAM_LRME, "fe_irq_status %x", fe_irq_status);
+ cam_io_w_mb(fe_irq_status,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_clear);
+ fe_irq_status &= CAM_LRME_FE_IRQ_MASK;
+
+ we_irq_status0 = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_status_0);
+ CAM_DBG(CAM_LRME, "we_irq_status[0] %x", we_irq_status0);
+ cam_io_w_mb(we_irq_status0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_clear_0);
+ we_irq_status0 &= CAM_LRME_WE_IRQ_MASK_0;
+
+ we_irq_status1 = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_status_1);
+ CAM_DBG(CAM_LRME, "we_irq_status[1] %x", we_irq_status1);
+ cam_io_w_mb(we_irq_status1,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_clear_1);
+ we_irq_status1 &= CAM_LRME_WE_IRQ_MASK_1;
+
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_cmd);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_cmd);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_cmd);
+
+ if (top_irq_status & 0x1) {
+ complete(&lrme_core->reset_complete);
+ top_irq_status &= (~0x1);
+ }
+
+ if (top_irq_status || fe_irq_status ||
+ we_irq_status0 || we_irq_status1) {
+ task = cam_req_mgr_workq_get_task(lrme_core->work);
+ if (!task) {
+ CAM_ERR(CAM_LRME, "no empty task available");
+ return IRQ_NONE;
+ }
+ work_data = (struct cam_lrme_hw_work_data *)task->payload;
+ work_data->top_irq_status = top_irq_status;
+ work_data->fe_irq_status = fe_irq_status;
+ work_data->we_irq_status[0] = we_irq_status0;
+ work_data->we_irq_status[1] = we_irq_status1;
+ task->process_cb = cam_lrme_hw_process_irq;
+ rc = cam_req_mgr_workq_enqueue_task(task, data,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ CAM_ERR(CAM_LRME,
+ "Failed in enqueue work task, rc=%d", rc);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+
+ switch (cmd_type) {
+ case CAM_LRME_HW_CMD_PREPARE_HW_UPDATE: {
+ struct cam_lrme_hw_cmd_config_args *config_args;
+
+ config_args = (struct cam_lrme_hw_cmd_config_args *)cmd_args;
+ rc = cam_lrme_hw_util_process_config_hw(lrme_hw, config_args);
+ break;
+ }
+
+ case CAM_LRME_HW_CMD_REGISTER_CB: {
+ struct cam_lrme_hw_cmd_set_cb *cb_args;
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_core *lrme_core =
+ (struct cam_lrme_core *)lrme_hw->core_info;
+ cb_args = (struct cam_lrme_hw_cmd_set_cb *)cmd_args;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb =
+ cb_args->cam_lrme_hw_mgr_cb;
+ lrme_core->hw_mgr_cb.data = cb_args->data;
+ hw_device = cb_args->data;
+ rc = 0;
+ break;
+ }
+
+ case CAM_LRME_HW_CMD_SUBMIT: {
+ struct cam_lrme_hw_submit_args *submit_args;
+
+ submit_args = (struct cam_lrme_hw_submit_args *)cmd_args;
+ rc = cam_lrme_hw_submit_req(hw_priv,
+ submit_args, arg_size);
+ break;
+ }
+
+ case CAM_LRME_HW_CMD_DUMP_REGISTER: {
+ struct cam_lrme_core *lrme_core =
+ (struct cam_lrme_core *)lrme_hw->core_info;
+ lrme_core->dump_flag = *(bool *)cmd_args;
+ CAM_DBG(CAM_LRME, "dump_flag %d", lrme_core->dump_flag);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
new file mode 100644
index 000000000000..c0786f53c19c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
@@ -0,0 +1,458 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_CORE_H_
+#define _CAM_LRME_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_defs.h>
+#include <media/cam_lrme.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_req_mgr_workq.h"
+
+#define CAM_LRME_HW_RESET_TIMEOUT 3000
+
+#define CAM_LRME_BUS_RD_MAX_CLIENTS 2
+#define CAM_LRME_BUS_WR_MAX_CLIENTS 2
+
+#define CAM_LRME_HW_WORKQ_NUM_TASK 30
+
+#define CAM_LRME_TOP_IRQ_MASK 0x19
+#define CAM_LRME_WE_IRQ_MASK_0 0x2
+#define CAM_LRME_WE_IRQ_MASK_1 0x0
+#define CAM_LRME_FE_IRQ_MASK 0x0
+
+#define CAM_LRME_MAX_REG_PAIR_NUM 60
+
+/**
+ * enum cam_lrme_irq_set
+ *
+ * @CAM_LRME_IRQ_ENABLE : Enable irqs
+ * @CAM_LRME_IRQ_DISABLE : Disable irqs
+ */
+enum cam_lrme_irq_set {
+ CAM_LRME_IRQ_ENABLE,
+ CAM_LRME_IRQ_DISABLE,
+};
+
+/**
+ * struct cam_lrme_cdm_info : information used to submit cdm command
+ *
+ * @cdm_handle : CDM handle for this device
+ * @cdm_ops : CDM ops
+ * @cdm_cmd : CDM command pointer
+ */
+struct cam_lrme_cdm_info {
+ uint32_t cdm_handle;
+ struct cam_cdm_utils_ops *cdm_ops;
+ struct cam_cdm_bl_request *cdm_cmd;
+};
+
+/**
+ * struct cam_lrme_hw_work_data : Work data for HW work queue
+ *
+ * @top_irq_status : Top registers irq status
+ * @fe_irq_status : FE engine irq status
+ * @we_irq_status : WE engine irq status
+ */
+struct cam_lrme_hw_work_data {
+ uint32_t top_irq_status;
+ uint32_t fe_irq_status;
+ uint32_t we_irq_status[2];
+};
+
+/**
+ * enum cam_lrme_core_state : LRME core states
+ *
+ * @CAM_LRME_CORE_STATE_UNINIT : LRME is in uninit state
+ * @CAM_LRME_CORE_STATE_INIT : LRME is in init state after probe
+ * @ CAM_LRME_CORE_STATE_IDLE : LRME is in idle state. Hardware is in
+ * this state when no frame is processing
+ * or waiting for this core.
+ * @CAM_LRME_CORE_STATE_REQ_PENDING : LRME is in pending state. One frame is
+ * waiting for processing
+ * @CAM_LRME_CORE_STATE_PROCESSING : LRME is in processing state. HW manager
+ * can submit one more frame to HW
+ * @CAM_LRME_CORE_STATE_REQ_PROC_PEND : Indicate two frames are inside HW.
+ * @CAM_LRME_CORE_STATE_RECOVERY : Indicate core is in the process of reset
+ * @CAM_LRME_CORE_STATE_MAX : upper limit of states
+ */
+enum cam_lrme_core_state {
+ CAM_LRME_CORE_STATE_UNINIT,
+ CAM_LRME_CORE_STATE_INIT,
+ CAM_LRME_CORE_STATE_IDLE,
+ CAM_LRME_CORE_STATE_REQ_PENDING,
+ CAM_LRME_CORE_STATE_PROCESSING,
+ CAM_LRME_CORE_STATE_REQ_PROC_PEND,
+ CAM_LRME_CORE_STATE_RECOVERY,
+ CAM_LRME_CORE_STATE_MAX,
+};
+
+/**
+ * struct cam_lrme_core : LRME HW core information
+ *
+ * @hw_info : Pointer to base HW information structure
+ * @device_iommu : Device iommu handle
+ * @cdm_iommu : CDM iommu handle
+ * @hw_caps : Hardware capabilities
+ * @state : Hardware state
+ * @reset_complete : Reset completion
+ * @work : Hardware workqueue to handle irq events
+ * @work_data : Work data used by hardware workqueue
+ * @hw_mgr_cb : Hw manager callback
+ * @req_proc : Pointer to the processing frame request
+ * @req_submit : Pointer to the frame request waiting for processing
+ * @hw_cdm_info : CDM information used by this device
+ * @hw_idx : Hardware index
+ */
+struct cam_lrme_core {
+ struct cam_lrme_hw_info *hw_info;
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ struct cam_lrme_dev_cap hw_caps;
+ enum cam_lrme_core_state state;
+ struct completion reset_complete;
+ struct cam_req_mgr_core_workq *work;
+ struct cam_lrme_hw_work_data work_data[CAM_LRME_HW_WORKQ_NUM_TASK];
+ struct cam_lrme_hw_cmd_set_cb hw_mgr_cb;
+ struct cam_lrme_frame_request *req_proc;
+ struct cam_lrme_frame_request *req_submit;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+ uint32_t hw_idx;
+ bool dump_flag;
+};
+
+/**
+ * struct cam_lrme_bus_rd_reg_common : Offsets of FE common registers
+ *
+ * @hw_version : Offset of hw_version register
+ * @hw_capability : Offset of hw_capability register
+ * @sw_reset : Offset of sw_reset register
+ * @cgc_override : Offset of cgc_override register
+ * @irq_mask : Offset of irq_mask register
+ * @irq_clear : Offset of irq_clear register
+ * @irq_cmd : Offset of irq_cmd register
+ * @irq_status : Offset of irq_status register
+ * @cmd : Offset of cmd register
+ * @irq_set : Offset of irq_set register
+ * @misr_reset : Offset of misr_reset register
+ * @security_cfg : Offset of security_cfg register
+ * @pwr_iso_cfg : Offset of pwr_iso_cfg register
+ * @pwr_iso_seed : Offset of pwr_iso_seed register
+ * @test_bus_ctrl : Offset of test_bus_ctrl register
+ * @spare : Offset of spare register
+ */
+struct cam_lrme_bus_rd_reg_common {
+ uint32_t hw_version;
+ uint32_t hw_capability;
+ uint32_t sw_reset;
+ uint32_t cgc_override;
+ uint32_t irq_mask;
+ uint32_t irq_clear;
+ uint32_t irq_cmd;
+ uint32_t irq_status;
+ uint32_t cmd;
+ uint32_t irq_set;
+ uint32_t misr_reset;
+ uint32_t security_cfg;
+ uint32_t pwr_iso_cfg;
+ uint32_t pwr_iso_seed;
+ uint32_t test_bus_ctrl;
+ uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_wr_reg_common : Offset of WE common registers
+ * @hw_version : Offset of hw_version register
+ * @hw_capability : Offset of hw_capability register
+ * @sw_reset : Offset of sw_reset register
+ * @cgc_override : Offset of cgc_override register
+ * @misr_reset : Offset of misr_reset register
+ * @pwr_iso_cfg : Offset of pwr_iso_cfg register
+ * @test_bus_ctrl : Offset of test_bus_ctrl register
+ * @composite_mask_0 : Offset of composite_mask_0 register
+ * @irq_mask_0 : Offset of irq_mask_0 register
+ * @irq_mask_1 : Offset of irq_mask_1 register
+ * @irq_clear_0 : Offset of irq_clear_0 register
+ * @irq_clear_1 : Offset of irq_clear_1 register
+ * @irq_status_0 : Offset of irq_status_0 register
+ * @irq_status_1 : Offset of irq_status_1 register
+ * @irq_cmd : Offset of irq_cmd register
+ * @irq_set_0 : Offset of irq_set_0 register
+ * @irq_set_1 : Offset of irq_set_1 register
+ * @addr_fifo_status : Offset of addr_fifo_status register
+ * @frame_header_cfg0 : Offset of frame_header_cfg0 register
+ * @frame_header_cfg1 : Offset of frame_header_cfg1 register
+ * @spare : Offset of spare register
+ */
+struct cam_lrme_bus_wr_reg_common {
+ uint32_t hw_version;
+ uint32_t hw_capability;
+ uint32_t sw_reset;
+ uint32_t cgc_override;
+ uint32_t misr_reset;
+ uint32_t pwr_iso_cfg;
+ uint32_t test_bus_ctrl;
+ uint32_t composite_mask_0;
+ uint32_t irq_mask_0;
+ uint32_t irq_mask_1;
+ uint32_t irq_clear_0;
+ uint32_t irq_clear_1;
+ uint32_t irq_status_0;
+ uint32_t irq_status_1;
+ uint32_t irq_cmd;
+ uint32_t irq_set_0;
+ uint32_t irq_set_1;
+ uint32_t addr_fifo_status;
+ uint32_t frame_header_cfg0;
+ uint32_t frame_header_cfg1;
+ uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_rd_bus_client : Offset of FE registers
+ *
+ * @core_cfg : Offset of core_cfg register
+ * @ccif_meta_data : Offset of ccif_meta_data register
+ * @addr_image : Offset of addr_image register
+ * @rd_buffer_size : Offset of rd_buffer_size register
+ * @rd_stride : Offset of rd_stride register
+ * @unpack_cfg_0 : Offset of unpack_cfg_0 register
+ * @latency_buff_allocation : Offset of latency_buff_allocation register
+ * @burst_limit_cfg : Offset of burst_limit_cfg register
+ * @misr_cfg_0 : Offset of misr_cfg_0 register
+ * @misr_cfg_1 : Offset of misr_cfg_1 register
+ * @misr_rd_val : Offset of misr_rd_val register
+ * @debug_status_cfg : Offset of debug_status_cfg register
+ * @debug_status_0 : Offset of debug_status_0 register
+ * @debug_status_1 : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_rd_bus_client {
+ uint32_t core_cfg;
+ uint32_t ccif_meta_data;
+ uint32_t addr_image;
+ uint32_t rd_buffer_size;
+ uint32_t rd_stride;
+ uint32_t unpack_cfg_0;
+ uint32_t latency_buff_allocation;
+ uint32_t burst_limit_cfg;
+ uint32_t misr_cfg_0;
+ uint32_t misr_cfg_1;
+ uint32_t misr_rd_val;
+ uint32_t debug_status_cfg;
+ uint32_t debug_status_0;
+ uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_wr_bus_client : Offset of WE registers
+ *
+ * @status_0 : Offset of status_0 register
+ * @status_1 : Offset of status_1 register
+ * @cfg : Offset of cfg register
+ * @addr_frame_header : Offset of addr_frame_header register
+ * @frame_header_cfg : Offset of frame_header_cfg register
+ * @addr_image : Offset of addr_image register
+ * @addr_image_offset : Offset of addr_image_offset register
+ * @buffer_width_cfg : Offset of buffer_width_cfg register
+ * @buffer_height_cfg : Offset of buffer_height_cfg register
+ * @packer_cfg : Offset of packer_cfg register
+ * @wr_stride : Offset of wr_stride register
+ * @irq_subsample_cfg_period : Offset of irq_subsample_cfg_period register
+ * @irq_subsample_cfg_pattern : Offset of irq_subsample_cfg_pattern register
+ * @burst_limit_cfg : Offset of burst_limit_cfg register
+ * @misr_cfg : Offset of misr_cfg register
+ * @misr_rd_word_sel : Offset of misr_rd_word_sel register
+ * @misr_val : Offset of misr_val register
+ * @debug_status_cfg : Offset of debug_status_cfg register
+ * @debug_status_0 : Offset of debug_status_0 register
+ * @debug_status_1 : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_wr_bus_client {
+ uint32_t status_0;
+ uint32_t status_1;
+ uint32_t cfg;
+ uint32_t addr_frame_header;
+ uint32_t frame_header_cfg;
+ uint32_t addr_image;
+ uint32_t addr_image_offset;
+ uint32_t buffer_width_cfg;
+ uint32_t buffer_height_cfg;
+ uint32_t packer_cfg;
+ uint32_t wr_stride;
+ uint32_t irq_subsample_cfg_period;
+ uint32_t irq_subsample_cfg_pattern;
+ uint32_t burst_limit_cfg;
+ uint32_t misr_cfg;
+ uint32_t misr_rd_word_sel;
+ uint32_t misr_val;
+ uint32_t debug_status_cfg;
+ uint32_t debug_status_0;
+ uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_rd_hw_info : FE registers information
+ *
+ * @common_reg : FE common register
+ * @bus_client_reg : List of FE bus registers information
+ */
+struct cam_lrme_bus_rd_hw_info {
+ struct cam_lrme_bus_rd_reg_common common_reg;
+ struct cam_lrme_bus_rd_bus_client
+ bus_client_reg[CAM_LRME_BUS_RD_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_bus_wr_hw_info : WE engine registers information
+ *
+ * @common_reg : WE common register
+ * @bus_client_reg : List of WE bus registers information
+ */
+struct cam_lrme_bus_wr_hw_info {
+ struct cam_lrme_bus_wr_reg_common common_reg;
+ struct cam_lrme_bus_wr_bus_client
+ bus_client_reg[CAM_LRME_BUS_WR_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_clc_reg : Offset of clc registers
+ *
+ * @clc_hw_version : Offset of clc_hw_version register
+ * @clc_hw_status : Offset of clc_hw_status register
+ * @clc_hw_status_dbg : Offset of clc_hw_status_dbg register
+ * @clc_module_cfg : Offset of clc_module_cfg register
+ * @clc_moduleformat : Offset of clc_moduleformat register
+ * @clc_rangestep : Offset of clc_rangestep register
+ * @clc_offset : Offset of clc_offset register
+ * @clc_maxallowedsad : Offset of clc_maxallowedsad register
+ * @clc_minallowedtarmad : Offset of clc_minallowedtarmad register
+ * @clc_meaningfulsaddiff : Offset of clc_meaningfulsaddiff register
+ * @clc_minsaddiffdenom : Offset of clc_minsaddiffdenom register
+ * @clc_robustnessmeasuredistmap_0 : Offset of measuredistmap_0 register
+ * @clc_robustnessmeasuredistmap_1 : Offset of measuredistmap_1 register
+ * @clc_robustnessmeasuredistmap_2 : Offset of measuredistmap_2 register
+ * @clc_robustnessmeasuredistmap_3 : Offset of measuredistmap_3 register
+ * @clc_robustnessmeasuredistmap_4 : Offset of measuredistmap_4 register
+ * @clc_robustnessmeasuredistmap_5 : Offset of measuredistmap_5 register
+ * @clc_robustnessmeasuredistmap_6 : Offset of measuredistmap_6 register
+ * @clc_robustnessmeasuredistmap_7 : Offset of measuredistmap_7 register
+ * @clc_ds_crop_horizontal : Offset of clc_ds_crop_horizontal register
+ * @clc_ds_crop_vertical : Offset of clc_ds_crop_vertical register
+ * @clc_tar_pd_unpacker : Offset of clc_tar_pd_unpacker register
+ * @clc_ref_pd_unpacker : Offset of clc_ref_pd_unpacker register
+ * @clc_sw_override : Offset of clc_sw_override register
+ * @clc_tar_height : Offset of clc_tar_height register
+ * @clc_test_bus_ctrl : Offset of clc_test_bus_ctrl register
+ * @clc_spare : Offset of clc_spare register
+ */
+struct cam_lrme_clc_reg {
+ uint32_t clc_hw_version;
+ uint32_t clc_hw_status;
+ uint32_t clc_hw_status_dbg;
+ uint32_t clc_module_cfg;
+ uint32_t clc_moduleformat;
+ uint32_t clc_rangestep;
+ uint32_t clc_offset;
+ uint32_t clc_maxallowedsad;
+ uint32_t clc_minallowedtarmad;
+ uint32_t clc_meaningfulsaddiff;
+ uint32_t clc_minsaddiffdenom;
+ uint32_t clc_robustnessmeasuredistmap_0;
+ uint32_t clc_robustnessmeasuredistmap_1;
+ uint32_t clc_robustnessmeasuredistmap_2;
+ uint32_t clc_robustnessmeasuredistmap_3;
+ uint32_t clc_robustnessmeasuredistmap_4;
+ uint32_t clc_robustnessmeasuredistmap_5;
+ uint32_t clc_robustnessmeasuredistmap_6;
+ uint32_t clc_robustnessmeasuredistmap_7;
+ uint32_t clc_ds_crop_horizontal;
+ uint32_t clc_ds_crop_vertical;
+ uint32_t clc_tar_pd_unpacker;
+ uint32_t clc_ref_pd_unpacker;
+ uint32_t clc_sw_override;
+ uint32_t clc_tar_height;
+ uint32_t clc_ref_height;
+ uint32_t clc_test_bus_ctrl;
+ uint32_t clc_spare;
+};
+
+/**
+ * struct cam_lrme_titan_reg : Offset of LRME top registers
+ *
+ * @top_hw_version : Offset of top_hw_version register
+ * @top_titan_version : Offset of top_titan_version register
+ * @top_rst_cmd : Offset of top_rst_cmd register
+ * @top_core_clk_cfg : Offset of top_core_clk_cfg register
+ * @top_irq_status : Offset of top_irq_status register
+ * @top_irq_mask : Offset of top_irq_mask register
+ * @top_irq_clear : Offset of top_irq_clear register
+ * @top_irq_set : Offset of top_irq_set register
+ * @top_irq_cmd : Offset of top_irq_cmd register
+ * @top_violation_status : Offset of top_violation_status register
+ * @top_spare : Offset of top_spare register
+ */
+struct cam_lrme_titan_reg {
+ uint32_t top_hw_version;
+ uint32_t top_titan_version;
+ uint32_t top_rst_cmd;
+ uint32_t top_core_clk_cfg;
+ uint32_t top_irq_status;
+ uint32_t top_irq_mask;
+ uint32_t top_irq_clear;
+ uint32_t top_irq_set;
+ uint32_t top_irq_cmd;
+ uint32_t top_violation_status;
+ uint32_t top_spare;
+};
+
+/**
+ * struct cam_lrme_hw_info : LRME registers information
+ *
+ * @clc_reg : LRME CLC registers
+ * @bus_rd_reg : LRME FE registers
+ * @bus_wr_reg : LRME WE registers
+ * @titan_reg : LRME top reisters
+ */
+struct cam_lrme_hw_info {
+ struct cam_lrme_clc_reg clc_reg;
+ struct cam_lrme_bus_rd_hw_info bus_rd_reg;
+ struct cam_lrme_bus_wr_hw_info bus_wr_reg;
+ struct cam_lrme_titan_reg titan_reg;
+};
+
+int cam_lrme_hw_process_irq(void *priv, void *data);
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+ uint32_t arg_size);
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_lrme_hw_stop(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+ uint32_t arg_size);
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data);
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_dev_cap *hw_caps);
+int cam_lrme_hw_start(void *hw_priv, void *hw_init_args, uint32_t arg_size);
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size);
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw, enum cam_lrme_irq_set set);
+
+#endif /* _CAM_LRME_HW_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
new file mode 100644
index 000000000000..ec4297822fb7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -0,0 +1,320 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_reg.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+
+static int cam_lrme_hw_dev_util_cdm_acquire(struct cam_lrme_core *lrme_core,
+ struct cam_hw_info *lrme_hw)
+{
+ int rc, i;
+ struct cam_cdm_bl_request *cdm_cmd;
+ struct cam_cdm_acquire_data cdm_acquire;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+
+ hw_cdm_info = kzalloc(sizeof(struct cam_lrme_cdm_info),
+ GFP_KERNEL);
+ if (!hw_cdm_info) {
+ CAM_ERR(CAM_LRME, "No memory for hw_cdm_info");
+ return -ENOMEM;
+ }
+
+ cdm_cmd = kzalloc((sizeof(struct cam_cdm_bl_request) +
+ ((CAM_LRME_MAX_HW_ENTRIES - 1) *
+ sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+ if (!cdm_cmd) {
+ CAM_ERR(CAM_LRME, "No memory for cdm_cmd");
+ kfree(hw_cdm_info);
+ return -ENOMEM;
+ }
+
+ memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+ strlcpy(cdm_acquire.identifier, "lrmecdm", sizeof("lrmecdm"));
+ cdm_acquire.cell_index = lrme_hw->soc_info.index;
+ cdm_acquire.handle = 0;
+ cdm_acquire.userdata = hw_cdm_info;
+ cdm_acquire.cam_cdm_callback = NULL;
+ cdm_acquire.id = CAM_CDM_VIRTUAL;
+ cdm_acquire.base_array_cnt = lrme_hw->soc_info.num_reg_map;
+ for (i = 0; i < lrme_hw->soc_info.num_reg_map; i++)
+ cdm_acquire.base_array[i] = &lrme_hw->soc_info.reg_map[i];
+
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Can't acquire cdm");
+ goto error;
+ }
+
+ hw_cdm_info->cdm_cmd = cdm_cmd;
+ hw_cdm_info->cdm_ops = cdm_acquire.ops;
+ hw_cdm_info->cdm_handle = cdm_acquire.handle;
+
+ lrme_core->hw_cdm_info = hw_cdm_info;
+ CAM_DBG(CAM_LRME, "cdm acquire done");
+
+ return 0;
+error:
+ kfree(cdm_cmd);
+ kfree(hw_cdm_info);
+ return rc;
+}
+
+static int cam_lrme_hw_dev_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_hw_intf lrme_hw_intf;
+ struct cam_lrme_core *lrme_core;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_lrme_hw_info *hw_info;
+ int rc, i;
+
+ lrme_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "No memory to create lrme_hw");
+ return -ENOMEM;
+ }
+
+ lrme_core = kzalloc(sizeof(struct cam_lrme_core), GFP_KERNEL);
+ if (!lrme_core) {
+ CAM_ERR(CAM_LRME, "No memory to create lrme_core");
+ kfree(lrme_hw);
+ return -ENOMEM;
+ }
+
+ lrme_hw->core_info = lrme_core;
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ lrme_hw->soc_info.pdev = pdev;
+ lrme_hw->soc_info.dev = &pdev->dev;
+ lrme_hw->soc_info.dev_name = pdev->name;
+ lrme_hw->open_count = 0;
+ lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+
+ mutex_init(&lrme_hw->hw_mutex);
+ spin_lock_init(&lrme_hw->hw_lock);
+ init_completion(&lrme_hw->hw_complete);
+ init_completion(&lrme_core->reset_complete);
+
+ rc = cam_req_mgr_workq_create("cam_lrme_hw_worker",
+ CAM_LRME_HW_WORKQ_NUM_TASK,
+ &lrme_core->work, CRM_WORKQ_USAGE_IRQ, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Unable to create a workq, rc=%d", rc);
+ goto free_memory;
+ }
+
+ for (i = 0; i < CAM_LRME_HW_WORKQ_NUM_TASK; i++)
+ lrme_core->work->task.pool[i].payload =
+ &lrme_core->work_data[i];
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev || !match_dev->data) {
+ CAM_ERR(CAM_LRME, "No Of_match data, %pK", match_dev);
+ rc = -EINVAL;
+ goto destroy_workqueue;
+ }
+ hw_info = (struct cam_lrme_hw_info *)match_dev->data;
+ lrme_core->hw_info = hw_info;
+
+ rc = cam_lrme_soc_init_resources(&lrme_hw->soc_info,
+ cam_lrme_hw_irq, lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to init soc, rc=%d", rc);
+ goto destroy_workqueue;
+ }
+
+ rc = cam_lrme_hw_dev_util_cdm_acquire(lrme_core, lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire cdm");
+ goto deinit_platform_res;
+ }
+
+ rc = cam_smmu_get_handle("lrme", &lrme_core->device_iommu.non_secure);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Get iommu handle failed");
+ goto release_cdm;
+ }
+
+ rc = cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_ATTACH);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME attach iommu handle failed, rc=%d", rc);
+ goto destroy_smmu;
+ }
+
+ rc = cam_lrme_hw_start(lrme_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to hw init, rc=%d", rc);
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_hw_util_get_caps(lrme_hw, &lrme_core->hw_caps);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw caps, rc=%d", rc);
+ if (cam_lrme_hw_stop(lrme_hw, NULL, 0))
+ CAM_ERR(CAM_LRME, "Failed in hw deinit");
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_hw_stop(lrme_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to deinit hw, rc=%d", rc);
+ goto detach_smmu;
+ }
+
+ lrme_core->hw_idx = lrme_hw->soc_info.index;
+ lrme_hw_intf.hw_priv = lrme_hw;
+ lrme_hw_intf.hw_idx = lrme_hw->soc_info.index;
+ lrme_hw_intf.hw_ops.get_hw_caps = cam_lrme_hw_get_caps;
+ lrme_hw_intf.hw_ops.init = NULL;
+ lrme_hw_intf.hw_ops.deinit = NULL;
+ lrme_hw_intf.hw_ops.reset = cam_lrme_hw_reset;
+ lrme_hw_intf.hw_ops.reserve = NULL;
+ lrme_hw_intf.hw_ops.release = NULL;
+ lrme_hw_intf.hw_ops.start = cam_lrme_hw_start;
+ lrme_hw_intf.hw_ops.stop = cam_lrme_hw_stop;
+ lrme_hw_intf.hw_ops.read = NULL;
+ lrme_hw_intf.hw_ops.write = NULL;
+ lrme_hw_intf.hw_ops.process_cmd = cam_lrme_hw_process_cmd;
+ lrme_hw_intf.hw_ops.flush = cam_lrme_hw_flush;
+ lrme_hw_intf.hw_type = CAM_HW_LRME;
+
+ rc = cam_cdm_get_iommu_handle("lrmecdm", &lrme_core->cdm_iommu);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire the CDM iommu handles");
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_mgr_register_device(&lrme_hw_intf,
+ &lrme_core->device_iommu,
+ &lrme_core->cdm_iommu);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to register device");
+ goto detach_smmu;
+ }
+
+ platform_set_drvdata(pdev, lrme_hw);
+ CAM_DBG(CAM_LRME, "LRME-%d probe successful", lrme_hw_intf.hw_idx);
+
+ return rc;
+
+detach_smmu:
+ cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+destroy_smmu:
+ cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+release_cdm:
+ cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+ kfree(lrme_core->hw_cdm_info->cdm_cmd);
+ kfree(lrme_core->hw_cdm_info);
+deinit_platform_res:
+ if (cam_lrme_soc_deinit_resources(&lrme_hw->soc_info))
+ CAM_ERR(CAM_LRME, "Failed in soc deinit");
+ mutex_destroy(&lrme_hw->hw_mutex);
+destroy_workqueue:
+ cam_req_mgr_workq_destroy(&lrme_core->work);
+free_memory:
+ mutex_destroy(&lrme_hw->hw_mutex);
+ kfree(lrme_hw);
+ kfree(lrme_core);
+
+ return rc;
+}
+
+static int cam_lrme_hw_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+
+ lrme_hw = platform_get_drvdata(pdev);
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "Invalid lrme_hw from fd_hw_intf");
+ rc = -ENODEV;
+ goto deinit_platform_res;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ if (!lrme_core) {
+ CAM_ERR(CAM_LRME, "Invalid lrme_core from fd_hw");
+ rc = -EINVAL;
+ goto deinit_platform_res;
+ }
+
+ cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+ cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+ cam_lrme_mgr_deregister_device(lrme_core->hw_idx);
+
+ kfree(lrme_core->hw_cdm_info->cdm_cmd);
+ kfree(lrme_core->hw_cdm_info);
+ kfree(lrme_core);
+
+deinit_platform_res:
+ rc = cam_lrme_soc_deinit_resources(&lrme_hw->soc_info);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Error in LRME soc deinit, rc=%d", rc);
+
+ mutex_destroy(&lrme_hw->hw_mutex);
+ kfree(lrme_hw);
+
+ return rc;
+}
+
+static const struct of_device_id cam_lrme_hw_dt_match[] = {
+ {
+ .compatible = "qcom,lrme",
+ .data = &cam_lrme10_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, cam_lrme_hw_dt_match);
+
+static struct platform_driver cam_lrme_hw_driver = {
+ .probe = cam_lrme_hw_dev_probe,
+ .remove = cam_lrme_hw_dev_remove,
+ .driver = {
+ .name = "cam_lrme_hw",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_lrme_hw_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_lrme_hw_init_module(void)
+{
+ return platform_driver_register(&cam_lrme_hw_driver);
+}
+
+static void __exit cam_lrme_hw_exit_module(void)
+{
+ platform_driver_unregister(&cam_lrme_hw_driver);
+}
+
+module_init(cam_lrme_hw_init_module);
+module_exit(cam_lrme_hw_exit_module);
+MODULE_DESCRIPTION("CAM LRME HW driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
new file mode 100644
index 000000000000..4cd643f0413a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_INTF_H_
+#define _CAM_LRME_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_lrme.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+
+#define CAM_LRME_MAX_IO_BUFFER 2
+#define CAM_LRME_MAX_HW_ENTRIES 5
+
+#define CAM_LRME_BASE_IDX 0
+
+/**
+ * enum cam_lrme_hw_type : Enum for LRME HW type
+ *
+ * @CAM_HW_LRME : LRME HW type
+ */
+enum cam_lrme_hw_type {
+ CAM_HW_LRME,
+};
+
+/**
+ * enum cam_lrme_cb_type : HW manager call back type
+ *
+ * @CAM_LRME_CB_BUF_DONE : Indicate buf done has been generated
+ * @CAM_LRME_CB_COMP_REG_UPDATE : Indicate receiving WE comp reg update
+ * @CAM_LRME_CB_PUT_FRAME : Request HW manager to put back the frame
+ * @CAM_LRME_CB_ERROR : Indicate error irq has been generated
+ */
+enum cam_lrme_cb_type {
+ CAM_LRME_CB_BUF_DONE = 1,
+ CAM_LRME_CB_COMP_REG_UPDATE = 1 << 1,
+ CAM_LRME_CB_PUT_FRAME = 1 << 2,
+ CAM_LRME_CB_ERROR = 1 << 3,
+};
+
+/**
+ * enum cam_lrme_hw_cmd_type : HW CMD type
+ *
+ * @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update
+ * @CAM_LRME_HW_CMD_REGISTER_CB : register HW manager callback
+ * @CAM_LRME_HW_CMD_SUBMIT : Submit frame to HW
+ * @CAM_LRME_HW_CMD_DUMP_REGISTER : dump register values
+ */
+enum cam_lrme_hw_cmd_type {
+ CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+ CAM_LRME_HW_CMD_REGISTER_CB,
+ CAM_LRME_HW_CMD_SUBMIT,
+ CAM_LRME_HW_CMD_DUMP_REGISTER,
+};
+
+/**
+ * enum cam_lrme_hw_reset_type : Type of reset
+ *
+ * @CAM_LRME_HW_RESET_TYPE_HW_RESET : HW reset
+ * @CAM_LRME_HW_RESET_TYPE_SW_RESET : SW reset
+ */
+enum cam_lrme_hw_reset_type {
+ CAM_LRME_HW_RESET_TYPE_HW_RESET,
+ CAM_LRME_HW_RESET_TYPE_SW_RESET,
+};
+
+/**
+ *struct cam_lrme_frame_request : LRME frame request
+ *
+ * @frame_list : List head
+ * @req_id : Request ID
+ * @ctxt_to_hw_map : Information about context id, priority and device id
+ * @hw_device : Pointer to HW device
+ * @hw_update_entries : List of hw_update_entries
+ * @num_hw_update_entries : number of hw_update_entries
+ */
+struct cam_lrme_frame_request {
+ struct list_head frame_list;
+ uint64_t req_id;
+ void *ctxt_to_hw_map;
+ struct cam_lrme_device *hw_device;
+ struct cam_hw_update_entry hw_update_entries[CAM_LRME_MAX_HW_ENTRIES];
+ uint32_t num_hw_update_entries;
+};
+
+/**
+ * struct cam_lrme_hw_io_buffer : IO buffer information
+ *
+ * @valid : Indicate whether this IO config is valid
+ * @io_cfg : Pointer to IO configuration
+ * @num_buf : Number of buffers
+ * @num_plane : Number of planes
+ * @io_addr : List of IO address
+ */
+struct cam_lrme_hw_io_buffer {
+ bool valid;
+ struct cam_buf_io_cfg *io_cfg;
+ uint32_t num_buf;
+ uint32_t num_plane;
+ uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_lrme_hw_cmd_config_args : Args for prepare HW update
+ *
+ * @hw_device : Pointer to HW device
+ * @input_buf : List of input buffers
+ * @output_buf : List of output buffers
+ * @cmd_buf_addr : Pointer to available KMD buffer
+ * @size : Available KMD buffer size
+ * @config_buf_size : Size used to prepare update
+ */
+struct cam_lrme_hw_cmd_config_args {
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_hw_io_buffer input_buf[CAM_LRME_MAX_IO_BUFFER];
+ struct cam_lrme_hw_io_buffer output_buf[CAM_LRME_MAX_IO_BUFFER];
+ uint32_t *cmd_buf_addr;
+ uint32_t size;
+ uint32_t config_buf_size;
+};
+
+/**
+ * struct cam_lrme_hw_flush_args : Args for flush HW
+ *
+ * @ctxt_to_hw_map : Identity of context
+ * @req_to_flush : Pointer to the frame need to flush in
+ * case of single frame flush
+ * @flush_type : Flush type
+ */
+struct cam_lrme_hw_flush_args {
+ void *ctxt_to_hw_map;
+ struct cam_lrme_frame_request *req_to_flush;
+ uint32_t flush_type;
+};
+
+/**
+ * struct cam_lrme_hw_reset_args : Args for reset HW
+ *
+ * @reset_type : Enum cam_lrme_hw_reset_type
+ */
+struct cam_lrme_hw_reset_args {
+ uint32_t reset_type;
+};
+
+/**
+ * struct cam_lrme_hw_cb_args : HW manager callback args
+ *
+ * @cb_type : Callback event type
+ * @frame_req : Pointer to the frame associated with the cb
+ */
+struct cam_lrme_hw_cb_args {
+ uint32_t cb_type;
+ struct cam_lrme_frame_request *frame_req;
+};
+
+/**
+ * struct cam_lrme_hw_cmd_set_cb : Args for set callback function
+ *
+ * @cam_lrme_hw_mgr_cb : Callback function pointer
+ * @data : Data sent along with callback function
+ */
+struct cam_lrme_hw_cmd_set_cb {
+ int (*cam_lrme_hw_mgr_cb)(void *data,
+ struct cam_lrme_hw_cb_args *args);
+ void *data;
+};
+
+/**
+ * struct cam_lrme_hw_submit_args : Args for submit request
+ *
+ * @hw_update_entries : List of hw update entries used to program registers
+ * @num_hw_update_entries : Number of hw update entries
+ * @frame_req : Pointer to the frame request
+ */
+struct cam_lrme_hw_submit_args {
+ struct cam_hw_update_entry *hw_update_entries;
+ uint32_t num_hw_update_entries;
+ struct cam_lrme_frame_request *frame_req;
+};
+
+#endif /* _CAM_LRME_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
new file mode 100644
index 000000000000..7669b6ff3f5c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
@@ -0,0 +1,193 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_REG_H_
+#define _CAM_LRME_HW_REG_H_
+
+#include "cam_lrme_hw_core.h"
+
+static struct cam_lrme_hw_info cam_lrme10_hw_info = {
+ .clc_reg = {
+ .clc_hw_version = 0x00000000,
+ .clc_hw_status = 0x00000004,
+ .clc_hw_status_dbg = 0x00000008,
+ .clc_module_cfg = 0x00000060,
+ .clc_moduleformat = 0x000000A8,
+ .clc_rangestep = 0x00000068,
+ .clc_offset = 0x0000006C,
+ .clc_maxallowedsad = 0x00000070,
+ .clc_minallowedtarmad = 0x00000074,
+ .clc_meaningfulsaddiff = 0x00000078,
+ .clc_minsaddiffdenom = 0x0000007C,
+ .clc_robustnessmeasuredistmap_0 = 0x00000080,
+ .clc_robustnessmeasuredistmap_1 = 0x00000084,
+ .clc_robustnessmeasuredistmap_2 = 0x00000088,
+ .clc_robustnessmeasuredistmap_3 = 0x0000008C,
+ .clc_robustnessmeasuredistmap_4 = 0x00000090,
+ .clc_robustnessmeasuredistmap_5 = 0x00000094,
+ .clc_robustnessmeasuredistmap_6 = 0x00000098,
+ .clc_robustnessmeasuredistmap_7 = 0x0000009C,
+ .clc_ds_crop_horizontal = 0x000000A0,
+ .clc_ds_crop_vertical = 0x000000A4,
+ .clc_tar_pd_unpacker = 0x000000AC,
+ .clc_ref_pd_unpacker = 0x000000B0,
+ .clc_sw_override = 0x000000B4,
+ .clc_tar_height = 0x000000B8,
+ .clc_ref_height = 0x000000BC,
+ .clc_test_bus_ctrl = 0x000001F8,
+ .clc_spare = 0x000001FC,
+ },
+ .bus_rd_reg = {
+ .common_reg = {
+ .hw_version = 0x00000200,
+ .hw_capability = 0x00000204,
+ .sw_reset = 0x00000208,
+ .cgc_override = 0x0000020C,
+ .irq_mask = 0x00000210,
+ .irq_clear = 0x00000214,
+ .irq_cmd = 0x00000218,
+ .irq_status = 0x0000021C,
+ .cmd = 0x00000220,
+ .irq_set = 0x00000224,
+ .misr_reset = 0x0000022C,
+ .security_cfg = 0x00000230,
+ .pwr_iso_cfg = 0x00000234,
+ .pwr_iso_seed = 0x00000238,
+ .test_bus_ctrl = 0x00000248,
+ .spare = 0x0000024C,
+ },
+ .bus_client_reg = {
+ /* bus client 0 */
+ {
+ .core_cfg = 0x00000250,
+ .ccif_meta_data = 0x00000254,
+ .addr_image = 0x00000258,
+ .rd_buffer_size = 0x0000025C,
+ .rd_stride = 0x00000260,
+ .unpack_cfg_0 = 0x00000264,
+ .latency_buff_allocation = 0x00000278,
+ .burst_limit_cfg = 0x00000280,
+ .misr_cfg_0 = 0x00000284,
+ .misr_cfg_1 = 0x00000288,
+ .misr_rd_val = 0x0000028C,
+ .debug_status_cfg = 0x00000290,
+ .debug_status_0 = 0x00000294,
+ .debug_status_1 = 0x00000298,
+ },
+ /* bus client 1 */
+ {
+ .core_cfg = 0x000002F0,
+ .ccif_meta_data = 0x000002F4,
+ .addr_image = 0x000002F8,
+ .rd_buffer_size = 0x000002FC,
+ .rd_stride = 0x00000300,
+ .unpack_cfg_0 = 0x00000304,
+ .latency_buff_allocation = 0x00000318,
+ .burst_limit_cfg = 0x00000320,
+ .misr_cfg_0 = 0x00000324,
+ .misr_cfg_1 = 0x00000328,
+ .misr_rd_val = 0x0000032C,
+ .debug_status_cfg = 0x00000330,
+ .debug_status_0 = 0x00000334,
+ .debug_status_1 = 0x00000338,
+ },
+ },
+ },
+ .bus_wr_reg = {
+ .common_reg = {
+ .hw_version = 0x00000500,
+ .hw_capability = 0x00000504,
+ .sw_reset = 0x00000508,
+ .cgc_override = 0x0000050C,
+ .misr_reset = 0x000005C8,
+ .pwr_iso_cfg = 0x000005CC,
+ .test_bus_ctrl = 0x0000061C,
+ .composite_mask_0 = 0x00000510,
+ .irq_mask_0 = 0x00000544,
+ .irq_mask_1 = 0x00000548,
+ .irq_clear_0 = 0x00000550,
+ .irq_clear_1 = 0x00000554,
+ .irq_status_0 = 0x0000055C,
+ .irq_status_1 = 0x00000560,
+ .irq_cmd = 0x00000568,
+ .irq_set_0 = 0x000005BC,
+ .irq_set_1 = 0x000005C0,
+ .addr_fifo_status = 0x000005A8,
+ .frame_header_cfg0 = 0x000005AC,
+ .frame_header_cfg1 = 0x000005B0,
+ .spare = 0x00000620,
+ },
+ .bus_client_reg = {
+ /* bus client 0 */
+ {
+ .status_0 = 0x00000700,
+ .status_1 = 0x00000704,
+ .cfg = 0x00000708,
+ .addr_frame_header = 0x0000070C,
+ .frame_header_cfg = 0x00000710,
+ .addr_image = 0x00000714,
+ .addr_image_offset = 0x00000718,
+ .buffer_width_cfg = 0x0000071C,
+ .buffer_height_cfg = 0x00000720,
+ .packer_cfg = 0x00000724,
+ .wr_stride = 0x00000728,
+ .irq_subsample_cfg_period = 0x00000748,
+ .irq_subsample_cfg_pattern = 0x0000074C,
+ .burst_limit_cfg = 0x0000075C,
+ .misr_cfg = 0x00000760,
+ .misr_rd_word_sel = 0x00000764,
+ .misr_val = 0x00000768,
+ .debug_status_cfg = 0x0000076C,
+ .debug_status_0 = 0x00000770,
+ .debug_status_1 = 0x00000774,
+ },
+ /* bus client 1 */
+ {
+ .status_0 = 0x00000800,
+ .status_1 = 0x00000804,
+ .cfg = 0x00000808,
+ .addr_frame_header = 0x0000080C,
+ .frame_header_cfg = 0x00000810,
+ .addr_image = 0x00000814,
+ .addr_image_offset = 0x00000818,
+ .buffer_width_cfg = 0x0000081C,
+ .buffer_height_cfg = 0x00000820,
+ .packer_cfg = 0x00000824,
+ .wr_stride = 0x00000828,
+ .irq_subsample_cfg_period = 0x00000848,
+ .irq_subsample_cfg_pattern = 0x0000084C,
+ .burst_limit_cfg = 0x0000085C,
+ .misr_cfg = 0x00000860,
+ .misr_rd_word_sel = 0x00000864,
+ .misr_val = 0x00000868,
+ .debug_status_cfg = 0x0000086C,
+ .debug_status_0 = 0x00000870,
+ .debug_status_1 = 0x00000874,
+ },
+ },
+ },
+ .titan_reg = {
+ .top_hw_version = 0x00000900,
+ .top_titan_version = 0x00000904,
+ .top_rst_cmd = 0x00000908,
+ .top_core_clk_cfg = 0x00000920,
+ .top_irq_status = 0x0000090C,
+ .top_irq_mask = 0x00000910,
+ .top_irq_clear = 0x00000914,
+ .top_irq_set = 0x00000918,
+ .top_irq_cmd = 0x0000091C,
+ .top_violation_status = 0x00000924,
+ .top_spare = 0x000009FC,
+ },
+};
+
+#endif /* _CAM_LRME_HW_REG_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
new file mode 100644
index 000000000000..8c58685696ad
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
@@ -0,0 +1,158 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_soc_private *soc_private =
+ (struct cam_lrme_soc_private *)soc_info->soc_private;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ int rc = 0;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = 7200000;
+ axi_vote.uncompressed_bw = 7200000;
+ rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to start cpas, rc %d", rc);
+ return -EFAULT;
+ }
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+ true);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed to enable platform resource, rc %d", rc);
+ goto stop_cpas;
+ }
+
+ cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_ENABLE);
+
+ return rc;
+
+stop_cpas:
+ if (cam_cpas_stop(soc_private->cpas_handle))
+ CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+ return rc;
+}
+
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_soc_private *soc_private;
+ int rc = 0;
+
+ soc_private = soc_info->soc_private;
+
+ cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_DISABLE);
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to disable platform resource");
+ return rc;
+ }
+ rc = cam_cpas_stop(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+ return rc;
+}
+
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *private_data)
+{
+ struct cam_lrme_soc_private *soc_private;
+ struct cam_cpas_register_params cpas_register_param;
+ int rc;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in get_dt_properties, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+ private_data);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in request_platform_resource rc=%d",
+ rc);
+ return rc;
+ }
+
+ soc_private = kzalloc(sizeof(struct cam_lrme_soc_private), GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto release_res;
+ }
+ soc_info->soc_private = soc_private;
+
+ memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+ strlcpy(cpas_register_param.identifier,
+ "lrmecpas", CAM_HW_IDENTIFIER_LENGTH);
+ cpas_register_param.cell_index = soc_info->index;
+ cpas_register_param.dev = &soc_info->pdev->dev;
+ cpas_register_param.userdata = private_data;
+ cpas_register_param.cam_cpas_client_cb = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_param);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "CPAS registration failed");
+ goto free_soc_private;
+ }
+ soc_private->cpas_handle = cpas_register_param.client_handle;
+ CAM_DBG(CAM_LRME, "CPAS handle=%d", soc_private->cpas_handle);
+
+ return rc;
+
+free_soc_private:
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+release_res:
+ cam_soc_util_release_platform_resource(soc_info);
+
+ return rc;
+}
+
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+ struct cam_lrme_soc_private *soc_private =
+ (struct cam_lrme_soc_private *)soc_info->soc_private;
+ int rc;
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Unregister cpas failed, handle=%d, rc=%d",
+ soc_private->cpas_handle, rc);
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+ if (rc)
+ CAM_ERR(CAM_LRME, "release platform failed, rc=%d", rc);
+
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
new file mode 100644
index 000000000000..ca33c6f14dfb
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_SOC_H_
+#define _CAM_LRME_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+struct cam_lrme_soc_private {
+ uint32_t cpas_handle;
+};
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *private_data);
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_LRME_HW_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera_v3/cam_req_mgr/Makefile
new file mode 100644
index 000000000000..5e7cff8d5297
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_core.o\
+ cam_req_mgr_dev.o \
+ cam_req_mgr_util.o \
+ cam_req_mgr_workq.o \
+ cam_mem_mgr.o \
+ cam_req_mgr_timer.o \
+ cam_req_mgr_debug.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr.c
new file mode 100644
index 000000000000..2d17f7b51cd7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr.c
@@ -0,0 +1,1267 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_mem_table tbl;
+
+static int cam_mem_util_map_cpu_va(struct ion_handle *hdl,
+ uintptr_t *vaddr,
+ size_t *len)
+{
+ *vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
+ if (IS_ERR_OR_NULL((void *)(uintptr_t)(*vaddr))) {
+ CAM_ERR(CAM_MEM, "kernel map fail");
+ return -ENOSPC;
+ }
+
+ if (ion_handle_get_size(tbl.client, hdl, len)) {
+ CAM_ERR(CAM_MEM, "kernel get len failed");
+ ion_unmap_kernel(tbl.client, hdl);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int cam_mem_util_get_dma_dir(uint32_t flags)
+{
+ int rc = -EINVAL;
+
+ if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
+ rc = DMA_TO_DEVICE;
+ else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
+ rc = DMA_FROM_DEVICE;
+ else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ rc = DMA_BIDIRECTIONAL;
+ else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+ rc = DMA_BIDIRECTIONAL;
+
+ return rc;
+}
+
+static int cam_mem_util_client_create(void)
+{
+ int rc = 0;
+
+ tbl.client = msm_ion_client_create("camera_global_pool");
+ if (IS_ERR_OR_NULL(tbl.client)) {
+ CAM_ERR(CAM_MEM, "fail to create client");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void cam_mem_util_client_destroy(void)
+{
+ ion_client_destroy(tbl.client);
+ tbl.client = NULL;
+}
+
+int cam_mem_mgr_init(void)
+{
+ int rc;
+ int i;
+ int bitmap_size;
+
+ memset(tbl.bufq, 0, sizeof(tbl.bufq));
+
+ rc = cam_mem_util_client_create();
+ if (rc < 0) {
+ CAM_ERR(CAM_MEM, "fail to create ion client");
+ goto client_fail;
+ }
+
+ bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
+ tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!tbl.bitmap) {
+ rc = -ENOMEM;
+ goto bitmap_fail;
+ }
+ tbl.bits = bitmap_size * BITS_PER_BYTE;
+ bitmap_zero(tbl.bitmap, tbl.bits);
+ /* We need to reserve slot 0 because 0 is invalid */
+ set_bit(0, tbl.bitmap);
+
+ for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+ tbl.bufq[i].fd = -1;
+ tbl.bufq[i].buf_handle = -1;
+ }
+ mutex_init(&tbl.m_lock);
+ return rc;
+
+bitmap_fail:
+ cam_mem_util_client_destroy();
+client_fail:
+ return rc;
+}
+
+static int32_t cam_mem_get_slot(void)
+{
+ int32_t idx;
+
+ mutex_lock(&tbl.m_lock);
+ idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ mutex_unlock(&tbl.m_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(idx, tbl.bitmap);
+ tbl.bufq[idx].active = true;
+ mutex_init(&tbl.bufq[idx].q_lock);
+ mutex_unlock(&tbl.m_lock);
+
+ return idx;
+}
+
+static void cam_mem_put_slot(int32_t idx)
+{
+ mutex_lock(&tbl.m_lock);
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ tbl.bufq[idx].active = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ mutex_destroy(&tbl.bufq[idx].q_lock);
+ clear_bit(idx, tbl.bitmap);
+ mutex_unlock(&tbl.m_lock);
+}
+
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+ dma_addr_t *iova_ptr, size_t *len_ptr)
+{
+ int rc = 0, idx;
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+ return -EINVAL;
+
+ if (!tbl.bufq[idx].active)
+ return -EINVAL;
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ if (buf_handle != tbl.bufq[idx].buf_handle) {
+ rc = -EINVAL;
+ goto handle_mismatch;
+ }
+
+ if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
+ rc = cam_smmu_get_stage2_iova(mmu_handle,
+ tbl.bufq[idx].fd,
+ iova_ptr,
+ len_ptr);
+ else
+ rc = cam_smmu_get_iova(mmu_handle,
+ tbl.bufq[idx].fd,
+ iova_ptr,
+ len_ptr);
+ if (rc < 0)
+ CAM_ERR(CAM_MEM, "fail to get buf hdl :%d", buf_handle);
+
+handle_mismatch:
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_io_buf);
+
+int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
+{
+ int rc = 0;
+ int idx;
+ struct ion_handle *ion_hdl = NULL;
+ uintptr_t kvaddr = 0;
+ size_t klen = 0;
+
+ if (!buf_handle || !vaddr_ptr || !len)
+ return -EINVAL;
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+ return -EINVAL;
+
+ if (!tbl.bufq[idx].active)
+ return -EPERM;
+
+ if (buf_handle != tbl.bufq[idx].buf_handle) {
+ rc = -EINVAL;
+ goto exit_func;
+ }
+
+ if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)) {
+ rc = -EINVAL;
+ goto exit_func;
+ }
+
+ if (!tbl.bufq[idx].kmdvaddr) {
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ ion_hdl = tbl.bufq[idx].i_hdl;
+ if (!ion_hdl) {
+ CAM_ERR(CAM_MEM, "Invalid ION handle");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ rc = cam_mem_util_map_cpu_va(ion_hdl,
+ &kvaddr, &klen);
+ if (rc)
+ goto release_mutex;
+
+ tbl.bufq[idx].kmdvaddr = kvaddr;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ }
+
+ *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
+ *len = tbl.bufq[idx].len;
+
+ return rc;
+
+release_mutex:
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+exit_func:
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_cpu_buf);
+
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
+{
+ int rc = 0, idx;
+ uint32_t ion_cache_ops;
+ unsigned long ion_flag = 0;
+
+ if (!cmd)
+ return -EINVAL;
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+ return -EINVAL;
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+
+ if (!tbl.bufq[idx].active) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
+ &ion_flag);
+ if (rc) {
+ CAM_ERR(CAM_MEM, "cache get flags failed %d", rc);
+ goto fail;
+ }
+
+ if (ION_IS_CACHED(ion_flag)) {
+ switch (cmd->mem_cache_ops) {
+ case CAM_MEM_CLEAN_CACHE:
+ ion_cache_ops = ION_IOC_CLEAN_CACHES;
+ break;
+ case CAM_MEM_INV_CACHE:
+ ion_cache_ops = ION_IOC_INV_CACHES;
+ break;
+ case CAM_MEM_CLEAN_INV_CACHE:
+ ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
+ break;
+ default:
+ CAM_ERR(CAM_MEM,
+ "invalid cache ops :%d", cmd->mem_cache_ops);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rc = msm_ion_do_cache_op(tbl.client,
+ tbl.bufq[idx].i_hdl,
+ (void *)(uintptr_t)tbl.bufq[idx].vaddr,
+ tbl.bufq[idx].len,
+ ion_cache_ops);
+ if (rc)
+ CAM_ERR(CAM_MEM, "cache operation failed %d", rc);
+ }
+fail:
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
+
+static int cam_mem_util_get_dma_buf(size_t len,
+ size_t align,
+ unsigned int heap_id_mask,
+ unsigned int flags,
+ struct ion_handle **hdl,
+ struct dma_buf **buf)
+{
+ int rc = 0;
+
+ if (!hdl || !buf) {
+ CAM_ERR(CAM_MEM, "Invalid params");
+ return -EINVAL;
+ }
+
+ *hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
+ if (IS_ERR_OR_NULL(*hdl))
+ return -ENOMEM;
+
+ *buf = ion_share_dma_buf(tbl.client, *hdl);
+ if (IS_ERR_OR_NULL(*buf)) {
+ CAM_ERR(CAM_MEM, "get dma buf fail");
+ rc = -EINVAL;
+ goto get_buf_fail;
+ }
+
+ return rc;
+
+get_buf_fail:
+ ion_free(tbl.client, *hdl);
+ return rc;
+
+}
+
+static int cam_mem_util_get_dma_buf_fd(size_t len,
+ size_t align,
+ unsigned int heap_id_mask,
+ unsigned int flags,
+ struct ion_handle **hdl,
+ int *fd)
+{
+ int rc = 0;
+
+ if (!hdl || !fd) {
+ CAM_ERR(CAM_MEM, "Invalid params");
+ return -EINVAL;
+ }
+
+ *hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
+ if (IS_ERR_OR_NULL(*hdl))
+ return -ENOMEM;
+
+ *fd = ion_share_dma_buf_fd(tbl.client, *hdl);
+ if (*fd < 0) {
+ CAM_ERR(CAM_MEM, "get fd fail");
+ rc = -EINVAL;
+ goto get_fd_fail;
+ }
+
+ return rc;
+
+get_fd_fail:
+ ion_free(tbl.client, *hdl);
+ return rc;
+}
+
+static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
+ struct ion_handle **hdl,
+ int *fd)
+{
+ uint32_t heap_id;
+ uint32_t ion_flag = 0;
+ int rc;
+
+ if ((cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) &&
+ (cmd->flags & CAM_MEM_FLAG_CDSP_OUTPUT)) {
+ heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
+ ion_flag |=
+ ION_FLAG_SECURE | ION_FLAG_CP_CAMERA | ION_FLAG_CP_CDSP;
+ } else if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+ heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
+ ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
+ } else {
+ heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
+ ION_HEAP(ION_CAMERA_HEAP_ID);
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_CACHE)
+ ion_flag |= ION_FLAG_CACHED;
+ else
+ ion_flag &= ~ION_FLAG_CACHED;
+
+ rc = cam_mem_util_get_dma_buf_fd(cmd->len,
+ cmd->align,
+ heap_id,
+ ion_flag,
+ hdl,
+ fd);
+
+ return rc;
+}
+
+
+static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+ if (!cmd->flags) {
+ CAM_ERR(CAM_MEM, "Invalid flags");
+ return -EINVAL;
+ }
+
+ if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+ CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
+ CAM_MEM_MMU_MAX_HANDLE);
+ return -EINVAL;
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+ cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+ CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
+{
+ if (!cmd->flags) {
+ CAM_ERR(CAM_MEM, "Invalid flags");
+ return -EINVAL;
+ }
+
+ if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+ CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
+ CAM_MEM_MMU_MAX_HANDLE);
+ return -EINVAL;
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+ cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+ CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
+ return -EINVAL;
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+ CAM_ERR(CAM_MEM,
+ "Shared memory buffers are not allowed to be mapped");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cam_mem_util_map_hw_va(uint32_t flags,
+ int32_t *mmu_hdls,
+ int32_t num_hdls,
+ int fd,
+ dma_addr_t *hw_vaddr,
+ size_t *len,
+ enum cam_smmu_region_id region)
+{
+ int i;
+ int rc = -1;
+ int dir = cam_mem_util_get_dma_dir(flags);
+
+ if (dir < 0) {
+ CAM_ERR(CAM_MEM, "fail to map DMA direction");
+ return dir;
+ }
+
+ if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+ for (i = 0; i < num_hdls; i++) {
+ rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
+ fd,
+ dir,
+ tbl.client,
+ (ion_phys_addr_t *)hw_vaddr,
+ len);
+
+ if (rc < 0) {
+ CAM_ERR(CAM_MEM,
+ "Failed to securely map to smmu");
+ goto multi_map_fail;
+ }
+ }
+ } else {
+ for (i = 0; i < num_hdls; i++) {
+ rc = cam_smmu_map_user_iova(mmu_hdls[i],
+ fd,
+ dir,
+ (dma_addr_t *)hw_vaddr,
+ len,
+ region);
+
+ if (rc < 0) {
+ CAM_ERR(CAM_MEM, "Failed to map to smmu");
+ goto multi_map_fail;
+ }
+ }
+ }
+
+ return rc;
+multi_map_fail:
+ if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+ for (--i; i > 0; i--)
+ cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
+ else
+ for (--i; i > 0; i--)
+ cam_smmu_unmap_user_iova(mmu_hdls[i],
+ fd,
+ CAM_SMMU_REGION_IO);
+ return rc;
+
+}
+
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+ int rc;
+ int32_t idx;
+ struct ion_handle *ion_hdl;
+ int ion_fd;
+ dma_addr_t hw_vaddr = 0;
+ size_t len;
+
+ if (!cmd) {
+ CAM_ERR(CAM_MEM, " Invalid argument");
+ return -EINVAL;
+ }
+ len = cmd->len;
+
+ rc = cam_mem_util_check_flags(cmd);
+ if (rc) {
+ CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
+ return rc;
+ }
+
+ rc = cam_mem_util_ion_alloc(cmd,
+ &ion_hdl,
+ &ion_fd);
+ if (rc) {
+ CAM_ERR(CAM_MEM, "Ion allocation failed");
+ return rc;
+ }
+
+ idx = cam_mem_get_slot();
+ if (idx < 0) {
+ CAM_ERR(CAM_MEM, "Failed to get slot");
+ rc = -ENOMEM;
+ goto slot_fail;
+ }
+
+ if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+ (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
+ (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
+
+ enum cam_smmu_region_id region;
+
+ if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+
+
+ if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+ region = CAM_SMMU_REGION_SHARED;
+
+ rc = cam_mem_util_map_hw_va(cmd->flags,
+ cmd->mmu_hdls,
+ cmd->num_hdl,
+ ion_fd,
+ &hw_vaddr,
+ &len,
+ region);
+ if (rc)
+ goto map_hw_fail;
+ }
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ tbl.bufq[idx].fd = ion_fd;
+ tbl.bufq[idx].dma_buf = NULL;
+ tbl.bufq[idx].flags = cmd->flags;
+ tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
+ if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+ CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
+ tbl.bufq[idx].kmdvaddr = 0;
+
+ if (cmd->num_hdl > 0)
+ tbl.bufq[idx].vaddr = hw_vaddr;
+ else
+ tbl.bufq[idx].vaddr = 0;
+
+ tbl.bufq[idx].i_hdl = ion_hdl;
+ tbl.bufq[idx].len = cmd->len;
+ tbl.bufq[idx].num_hdl = cmd->num_hdl;
+ memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+ sizeof(int32_t) * cmd->num_hdl);
+ tbl.bufq[idx].is_imported = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+
+ cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+ cmd->out.fd = tbl.bufq[idx].fd;
+ cmd->out.vaddr = 0;
+
+ CAM_DBG(CAM_MEM, "buf handle: %x, fd: %d, len: %zu",
+ cmd->out.buf_handle, cmd->out.fd,
+ tbl.bufq[idx].len);
+
+ return rc;
+
+map_hw_fail:
+ cam_mem_put_slot(idx);
+slot_fail:
+ ion_free(tbl.client, ion_hdl);
+ return rc;
+}
+
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
+{
+ int32_t idx;
+ int rc;
+ struct ion_handle *ion_hdl;
+ dma_addr_t hw_vaddr = 0;
+ size_t len = 0;
+
+ if (!cmd || (cmd->fd < 0)) {
+ CAM_ERR(CAM_MEM, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE)
+ return -EINVAL;
+
+ rc = cam_mem_util_check_map_flags(cmd);
+ if (rc) {
+ CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
+ return rc;
+ }
+
+ ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
+ if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
+ CAM_ERR(CAM_MEM, "Failed to import ion fd");
+ return -EINVAL;
+ }
+
+ if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+ (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
+ rc = cam_mem_util_map_hw_va(cmd->flags,
+ cmd->mmu_hdls,
+ cmd->num_hdl,
+ cmd->fd,
+ &hw_vaddr,
+ &len,
+ CAM_SMMU_REGION_IO);
+ if (rc)
+ goto map_fail;
+ } else {
+ rc = ion_handle_get_size(tbl.client, ion_hdl, &len);
+ if (rc)
+ return rc;
+ }
+
+ idx = cam_mem_get_slot();
+ if (idx < 0) {
+ rc = -ENOMEM;
+ goto map_fail;
+ }
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ tbl.bufq[idx].fd = cmd->fd;
+ tbl.bufq[idx].dma_buf = NULL;
+ tbl.bufq[idx].flags = cmd->flags;
+ tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
+ if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+ CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
+ tbl.bufq[idx].kmdvaddr = 0;
+
+ if (cmd->num_hdl > 0)
+ tbl.bufq[idx].vaddr = hw_vaddr;
+ else
+ tbl.bufq[idx].vaddr = 0;
+
+ tbl.bufq[idx].i_hdl = ion_hdl;
+ tbl.bufq[idx].len = len;
+ tbl.bufq[idx].num_hdl = cmd->num_hdl;
+ memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+ sizeof(int32_t) * cmd->num_hdl);
+ tbl.bufq[idx].is_imported = true;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+
+ cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+ cmd->out.vaddr = 0;
+
+ return rc;
+
+map_fail:
+ ion_free(tbl.client, ion_hdl);
+ return rc;
+}
+
+static int cam_mem_util_unmap_hw_va(int32_t idx,
+ enum cam_smmu_region_id region,
+ enum cam_smmu_mapping_client client)
+{
+ int i;
+ uint32_t flags;
+ int32_t *mmu_hdls;
+ int num_hdls;
+ int fd;
+ int rc = -EINVAL;
+
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ CAM_ERR(CAM_MEM, "Incorrect index");
+ return rc;
+ }
+
+ flags = tbl.bufq[idx].flags;
+ mmu_hdls = tbl.bufq[idx].hdls;
+ num_hdls = tbl.bufq[idx].num_hdl;
+ fd = tbl.bufq[idx].fd;
+
+ if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+ for (i = 0; i < num_hdls; i++) {
+ rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
+ if (rc < 0)
+ goto unmap_end;
+ }
+ } else {
+ for (i = 0; i < num_hdls; i++) {
+ if (client == CAM_SMMU_MAPPING_USER) {
+ rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
+ fd, region);
+ } else if (client == CAM_SMMU_MAPPING_KERNEL) {
+ rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
+ tbl.bufq[idx].dma_buf, region);
+ } else {
+ CAM_ERR(CAM_MEM,
+ "invalid caller for unmapping : %d",
+ client);
+ rc = -EINVAL;
+ }
+ if (rc < 0)
+ goto unmap_end;
+ }
+ }
+
+ return rc;
+
+unmap_end:
+ CAM_ERR(CAM_MEM, "unmapping failed");
+ return rc;
+}
+
+static void cam_mem_mgr_unmap_active_buf(int idx)
+{
+ enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
+
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+ region = CAM_SMMU_REGION_SHARED;
+ else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+
+ cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
+}
+
+static int cam_mem_mgr_cleanup_table(void)
+{
+ int i;
+
+ mutex_lock(&tbl.m_lock);
+ for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+ if (!tbl.bufq[i].active) {
+ CAM_DBG(CAM_MEM,
+ "Buffer inactive at idx=%d, continuing", i);
+ continue;
+ } else {
+ CAM_DBG(CAM_MEM,
+ "Active buffer at idx=%d, possible leak needs unmapping",
+ i);
+ cam_mem_mgr_unmap_active_buf(i);
+ }
+
+ mutex_lock(&tbl.bufq[i].q_lock);
+ if (tbl.bufq[i].i_hdl) {
+ ion_free(tbl.client, tbl.bufq[i].i_hdl);
+ tbl.bufq[i].i_hdl = NULL;
+ }
+ tbl.bufq[i].fd = -1;
+ tbl.bufq[i].flags = 0;
+ tbl.bufq[i].buf_handle = -1;
+ tbl.bufq[i].vaddr = 0;
+ tbl.bufq[i].len = 0;
+ memset(tbl.bufq[i].hdls, 0,
+ sizeof(int32_t) * tbl.bufq[i].num_hdl);
+ tbl.bufq[i].num_hdl = 0;
+ tbl.bufq[i].i_hdl = NULL;
+ tbl.bufq[i].active = false;
+ mutex_unlock(&tbl.bufq[i].q_lock);
+ mutex_destroy(&tbl.bufq[i].q_lock);
+ }
+ bitmap_zero(tbl.bitmap, tbl.bits);
+ /* We need to reserve slot 0 because 0 is invalid */
+ set_bit(0, tbl.bitmap);
+ mutex_unlock(&tbl.m_lock);
+
+ return 0;
+}
+
+void cam_mem_mgr_deinit(void)
+{
+ cam_mem_mgr_cleanup_table();
+ mutex_lock(&tbl.m_lock);
+ bitmap_zero(tbl.bitmap, tbl.bits);
+ kfree(tbl.bitmap);
+ tbl.bitmap = NULL;
+ cam_mem_util_client_destroy();
+ mutex_unlock(&tbl.m_lock);
+ mutex_destroy(&tbl.m_lock);
+}
+
+static int cam_mem_util_unmap(int32_t idx,
+ enum cam_smmu_mapping_client client)
+{
+ int rc = 0;
+ enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
+
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ CAM_ERR(CAM_MEM, "Incorrect index");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
+
+ mutex_lock(&tbl.m_lock);
+ if ((!tbl.bufq[idx].active) &&
+ (tbl.bufq[idx].vaddr) == 0) {
+ CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
+ idx);
+ mutex_unlock(&tbl.m_lock);
+ return 0;
+ }
+
+
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
+ if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
+ ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
+
+ /* SHARED flag gets precedence, all other flags after it */
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+ region = CAM_SMMU_REGION_SHARED;
+ } else {
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+ }
+
+ if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+ (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
+ (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE))
+ rc = cam_mem_util_unmap_hw_va(idx, region, client);
+
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ tbl.bufq[idx].flags = 0;
+ tbl.bufq[idx].buf_handle = -1;
+ tbl.bufq[idx].vaddr = 0;
+ memset(tbl.bufq[idx].hdls, 0,
+ sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
+
+ CAM_DBG(CAM_MEM,
+ "Ion handle at idx = %d freeing = %pK, fd = %d, imported %d dma_buf %pK",
+ idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
+ tbl.bufq[idx].is_imported,
+ tbl.bufq[idx].dma_buf);
+
+ if (tbl.bufq[idx].i_hdl) {
+ ion_free(tbl.client, tbl.bufq[idx].i_hdl);
+ tbl.bufq[idx].i_hdl = NULL;
+ }
+
+ tbl.bufq[idx].fd = -1;
+ tbl.bufq[idx].dma_buf = NULL;
+ tbl.bufq[idx].is_imported = false;
+ tbl.bufq[idx].len = 0;
+ tbl.bufq[idx].num_hdl = 0;
+ tbl.bufq[idx].active = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ mutex_destroy(&tbl.bufq[idx].q_lock);
+ clear_bit(idx, tbl.bitmap);
+ mutex_unlock(&tbl.m_lock);
+
+ return rc;
+}
+
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
+{
+ int idx;
+ int rc;
+
+ if (!cmd) {
+ CAM_ERR(CAM_MEM, "Invalid argument");
+ return -EINVAL;
+ }
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
+ return -EINVAL;
+ }
+
+ if (!tbl.bufq[idx].active) {
+ CAM_ERR(CAM_MEM, "Released buffer state should be active");
+ return -EINVAL;
+ }
+
+ if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
+ CAM_ERR(CAM_MEM,
+ "Released buf handle not matching within table");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_MEM, "Releasing hdl = %u", cmd->buf_handle);
+ rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
+
+ return rc;
+}
+
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+ struct cam_mem_mgr_memory_desc *out)
+{
+ struct ion_handle *hdl;
+ struct dma_buf *buf = NULL;
+ int ion_fd = -1;
+ int rc = 0;
+ uint32_t heap_id;
+ int32_t ion_flag = 0;
+ uintptr_t kvaddr;
+ dma_addr_t iova = 0;
+ size_t request_len = 0;
+ uint32_t mem_handle;
+ int32_t idx;
+ int32_t smmu_hdl = 0;
+ int32_t num_hdl = 0;
+
+ enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
+
+ if (!inp || !out) {
+ CAM_ERR(CAM_MEM, "Invalid params");
+ return -EINVAL;
+ }
+
+ if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+ inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
+ inp->flags & CAM_MEM_FLAG_CACHE)) {
+ CAM_ERR(CAM_MEM, "Invalid flags for request mem");
+ return -EINVAL;
+ }
+
+ if (inp->flags & CAM_MEM_FLAG_CACHE)
+ ion_flag |= ION_FLAG_CACHED;
+ else
+ ion_flag &= ~ION_FLAG_CACHED;
+
+ heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
+ ION_HEAP(ION_CAMERA_HEAP_ID);
+
+ rc = cam_mem_util_get_dma_buf(inp->size,
+ inp->align,
+ heap_id,
+ ion_flag,
+ &hdl,
+ &buf);
+
+ if (rc) {
+ CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
+ goto ion_fail;
+ } else {
+ CAM_DBG(CAM_MEM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
+ }
+
+ rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
+ if (rc) {
+ CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
+ goto map_fail;
+ }
+
+ if (!inp->smmu_hdl) {
+ CAM_ERR(CAM_MEM, "Invalid SMMU handle");
+ rc = -EINVAL;
+ goto smmu_fail;
+ }
+
+ /* SHARED flag gets precedence, all other flags after it */
+ if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+ region = CAM_SMMU_REGION_SHARED;
+ } else {
+ if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+ }
+
+ rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
+ buf,
+ CAM_SMMU_MAP_RW,
+ &iova,
+ &request_len,
+ region);
+
+ if (rc < 0) {
+ CAM_ERR(CAM_MEM, "SMMU mapping failed");
+ goto smmu_fail;
+ }
+
+ smmu_hdl = inp->smmu_hdl;
+ num_hdl = 1;
+
+ idx = cam_mem_get_slot();
+ if (idx < 0) {
+ rc = -ENOMEM;
+ goto slot_fail;
+ }
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ mem_handle = GET_MEM_HANDLE(idx, ion_fd);
+ tbl.bufq[idx].dma_buf = buf;
+ tbl.bufq[idx].fd = -1;
+ tbl.bufq[idx].flags = inp->flags;
+ tbl.bufq[idx].buf_handle = mem_handle;
+ tbl.bufq[idx].kmdvaddr = kvaddr;
+
+ tbl.bufq[idx].vaddr = iova;
+
+ tbl.bufq[idx].i_hdl = hdl;
+ tbl.bufq[idx].len = inp->size;
+ tbl.bufq[idx].num_hdl = num_hdl;
+ memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
+ sizeof(int32_t));
+ tbl.bufq[idx].is_imported = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+
+ out->kva = kvaddr;
+ out->iova = (uint32_t)iova;
+ out->smmu_hdl = smmu_hdl;
+ out->mem_handle = mem_handle;
+ out->len = inp->size;
+ out->region = region;
+
+ return rc;
+slot_fail:
+ cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
+ buf, region);
+smmu_fail:
+ ion_unmap_kernel(tbl.client, hdl);
+map_fail:
+ ion_free(tbl.client, hdl);
+ion_fail:
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_request_mem);
+
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
+{
+ int32_t idx;
+ int rc;
+
+ if (!inp) {
+ CAM_ERR(CAM_MEM, "Invalid argument");
+ return -EINVAL;
+ }
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
+ return -EINVAL;
+ }
+
+ if (!tbl.bufq[idx].active) {
+ if (tbl.bufq[idx].vaddr == 0) {
+ CAM_ERR(CAM_MEM, "buffer is released already");
+ return 0;
+ }
+ CAM_ERR(CAM_MEM, "Released buffer state should be active");
+ return -EINVAL;
+ }
+
+ if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
+ CAM_ERR(CAM_MEM,
+ "Released buf handle not matching within table");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
+ rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_release_mem);
+
+int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
+ enum cam_smmu_region_id region,
+ struct cam_mem_mgr_memory_desc *out)
+{
+ struct ion_handle *hdl;
+ struct dma_buf *buf = NULL;
+ int rc = 0;
+ int ion_fd = -1;
+ uint32_t heap_id;
+ dma_addr_t iova = 0;
+ size_t request_len = 0;
+ uint32_t mem_handle;
+ int32_t idx;
+ int32_t smmu_hdl = 0;
+ int32_t num_hdl = 0;
+
+ if (!inp || !out) {
+ CAM_ERR(CAM_MEM, "Invalid param(s)");
+ return -EINVAL;
+ }
+
+ if (!inp->smmu_hdl) {
+ CAM_ERR(CAM_MEM, "Invalid SMMU handle");
+ return -EINVAL;
+ }
+
+ if (region != CAM_SMMU_REGION_SECHEAP) {
+ CAM_ERR(CAM_MEM, "Only secondary heap supported");
+ return -EINVAL;
+ }
+
+ heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
+ ION_HEAP(ION_CAMERA_HEAP_ID);
+ rc = cam_mem_util_get_dma_buf(inp->size,
+ inp->align,
+ heap_id,
+ 0,
+ &hdl,
+ &buf);
+
+ if (rc) {
+ CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
+ goto ion_fail;
+ } else {
+ CAM_DBG(CAM_MEM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
+ }
+
+ rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
+ buf,
+ &iova,
+ &request_len);
+
+ if (rc) {
+ CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
+ goto smmu_fail;
+ }
+
+ smmu_hdl = inp->smmu_hdl;
+ num_hdl = 1;
+
+ idx = cam_mem_get_slot();
+ if (idx < 0) {
+ rc = -ENOMEM;
+ goto slot_fail;
+ }
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ mem_handle = GET_MEM_HANDLE(idx, ion_fd);
+ tbl.bufq[idx].fd = -1;
+ tbl.bufq[idx].dma_buf = buf;
+ tbl.bufq[idx].flags = inp->flags;
+ tbl.bufq[idx].buf_handle = mem_handle;
+ tbl.bufq[idx].kmdvaddr = 0;
+
+ tbl.bufq[idx].vaddr = iova;
+
+ tbl.bufq[idx].i_hdl = hdl;
+ tbl.bufq[idx].len = request_len;
+ tbl.bufq[idx].num_hdl = num_hdl;
+ memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
+ sizeof(int32_t));
+ tbl.bufq[idx].is_imported = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+
+ out->kva = 0;
+ out->iova = (uint32_t)iova;
+ out->smmu_hdl = smmu_hdl;
+ out->mem_handle = mem_handle;
+ out->len = request_len;
+ out->region = region;
+
+ return rc;
+
+slot_fail:
+ cam_smmu_release_sec_heap(smmu_hdl);
+smmu_fail:
+ ion_free(tbl.client, hdl);
+ion_fail:
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
+
+int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
+{
+ int32_t idx;
+ int rc;
+ int32_t smmu_hdl;
+
+ if (!inp) {
+ CAM_ERR(CAM_MEM, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if (inp->region != CAM_SMMU_REGION_SECHEAP) {
+ CAM_ERR(CAM_MEM, "Only secondary heap supported");
+ return -EINVAL;
+ }
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
+ return -EINVAL;
+ }
+
+ if (!tbl.bufq[idx].active) {
+ if (tbl.bufq[idx].vaddr == 0) {
+ CAM_ERR(CAM_MEM, "buffer is released already");
+ return 0;
+ }
+ CAM_ERR(CAM_MEM, "Released buffer state should be active");
+ return -EINVAL;
+ }
+
+ if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
+ CAM_ERR(CAM_MEM,
+ "Released buf handle not matching within table");
+ return -EINVAL;
+ }
+
+ if (tbl.bufq[idx].num_hdl != 1) {
+ CAM_ERR(CAM_MEM,
+ "Sec heap region should have only one smmu hdl");
+ return -ENODEV;
+ }
+
+ memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
+ sizeof(int32_t));
+ if (inp->smmu_hdl != smmu_hdl) {
+ CAM_ERR(CAM_MEM,
+ "Passed SMMU handle doesn't match with internal hdl");
+ return -ENODEV;
+ }
+
+ rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
+ if (rc) {
+ CAM_ERR(CAM_MEM,
+ "Sec heap region release failed");
+ return -ENODEV;
+ }
+
+ CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
+ rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
+ if (rc)
+ CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr.h
new file mode 100644
index 000000000000..92c366d723f9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr.h
@@ -0,0 +1,131 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_MEM_MGR_H_
+#define _CAM_MEM_MGR_H_
+
+#include <linux/mutex.h>
+#include <linux/dma-buf.h>
+#include <media/cam_req_mgr.h>
+#include "cam_mem_mgr_api.h"
+
+#define CAM_MEM_BUFQ_MAX 1024
+
+/*Enum for possible SMMU operations */
+enum cam_smmu_mapping_client {
+ CAM_SMMU_MAPPING_USER,
+ CAM_SMMU_MAPPING_KERNEL,
+};
+
+/**
+ * struct cam_mem_buf_queue
+ *
+ * @i_hdl: ion handle for the buffer
+ * @dma_buf: pointer to the allocated dma_buf in the table
+ * @q_lock: mutex lock for buffer
+ * @hdls: list of mapped handles
+ * @num_hdl: number of handles
+ * @fd: file descriptor of buffer
+ * @buf_handle: unique handle for buffer
+ * @align: alignment for allocation
+ * @len: size of buffer
+ * @flags: attributes of buffer
+ * @vaddr: IOVA of buffer
+ * @kmdvaddr: Kernel virtual address
+ * @active: state of the buffer
+ * @is_imported: Flag indicating if buffer is imported from an FD in user space
+ */
+struct cam_mem_buf_queue {
+ struct ion_handle *i_hdl;
+ struct dma_buf *dma_buf;
+ struct mutex q_lock;
+ int32_t hdls[CAM_MEM_MMU_MAX_HANDLE];
+ int32_t num_hdl;
+ int32_t fd;
+ int32_t buf_handle;
+ int32_t align;
+ size_t len;
+ uint32_t flags;
+ uint64_t vaddr;
+ uintptr_t kmdvaddr;
+ bool active;
+ bool is_imported;
+};
+
+/**
+ * struct cam_mem_table
+ *
+ * @m_lock: mutex lock for table
+ * @bitmap: bitmap of the mem mgr utility
+ * @bits: max bits of the utility
+ * @client: ion client pointer
+ * @bufq: array of buffers
+ */
+struct cam_mem_table {
+ struct mutex m_lock;
+ void *bitmap;
+ size_t bits;
+ struct ion_client *client;
+ struct cam_mem_buf_queue bufq[CAM_MEM_BUFQ_MAX];
+};
+
+/**
+ * @brief: Allocates and maps buffer
+ *
+ * @cmd: Allocation information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd);
+
+/**
+ * @brief: Releases a buffer reference
+ *
+ * @cmd: Buffer release information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd);
+
+/**
+ * @brief Maps a buffer
+ *
+ * @cmd: Buffer mapping information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd);
+
+/**
+ * @brief: Perform cache ops on the buffer
+ *
+ * @cmd: Cache ops information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd);
+
+/**
+ * @brief: Initializes the memory manager
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_init(void);
+
+/**
+ * @brief: Tears down the memory manager
+ *
+ * @return None
+ */
+void cam_mem_mgr_deinit(void);
+
+#endif /* _CAM_MEM_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr_api.h
new file mode 100644
index 000000000000..64258e8fb5ee
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_mem_mgr_api.h
@@ -0,0 +1,125 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_MEM_MGR_API_H_
+#define _CAM_MEM_MGR_API_H_
+
+#include <media/cam_req_mgr.h>
+#include "cam_smmu_api.h"
+
+/**
+ * struct cam_mem_mgr_request_desc
+ *
+ * @size : Size of memory requested for allocation
+ * @align : Alignment of requested memory
+ * @smmu_hdl: SMMU handle to identify context bank where memory will be mapped
+ * @flags : Flags to indicate cached/uncached property
+ * @region : Region where memory should be allocated
+ */
+struct cam_mem_mgr_request_desc {
+ uint64_t size;
+ uint64_t align;
+ int32_t smmu_hdl;
+ uint32_t flags;
+};
+
+/**
+ * struct cam_mem_mgr_memory_desc
+ *
+ * @kva : Kernel virtual address of allocated memory
+ * @iova : IOVA of allocated memory
+ * @smmu_hdl : SMMU handle of allocated memory
+ * @mem_handle : Mem handle identifying allocated memory
+ * @len : Length of allocated memory
+ * @region : Region to which allocated memory belongs
+ */
+struct cam_mem_mgr_memory_desc {
+ uintptr_t kva;
+ uint32_t iova;
+ int32_t smmu_hdl;
+ uint32_t mem_handle;
+ uint64_t len;
+ enum cam_smmu_region_id region;
+};
+
+/**
+ * @brief: Requests a memory buffer
+ *
+ * @inp: Information specifying requested buffer properties
+ * @out: Information about allocated buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+ struct cam_mem_mgr_memory_desc *out);
+
+/**
+ * @brief: Releases a memory buffer
+ *
+ * @inp: Information specifying buffer to be released
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp);
+
+/**
+ * @brief: Returns IOVA information about buffer
+ *
+ * @buf_handle: Handle of the buffer
+ * @mmu_handle: SMMU handle where buffer is mapped
+ * @iova_ptr : Pointer to mmu's iova
+ * @len_ptr : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+ dma_addr_t *iova_ptr, size_t *len_ptr);
+/**
+ * @brief: Returns CPU address information about buffer
+ *
+ * @buf_handle: Handle for the buffer
+ * @vaddr_ptr : pointer to kernel virtual address
+ * @len_ptr : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr,
+ size_t *len);
+
+static inline bool cam_mem_is_secure_buf(int32_t buf_handle)
+{
+ return CAM_MEM_MGR_IS_SECURE_HDL(buf_handle);
+}
+
+/**
+ * @brief: Reserves a memory region
+ *
+ * @inp: Information specifying requested region properties
+ * @region : Region which is to be reserved
+ * @out : Information about reserved region
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
+ enum cam_smmu_region_id region,
+ struct cam_mem_mgr_memory_desc *out);
+
+/**
+ * @brief: Frees a memory region
+ *
+ * @inp : Information about region which is to be freed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp);
+
+#endif /* _CAM_MEM_MGR_API_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core.c
new file mode 100644
index 000000000000..275940f839e3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core.c
@@ -0,0 +1,2895 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_core.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_req_mgr_debug.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+#include "cam_req_mgr_dev.h"
+
+static struct cam_req_mgr_core_device *g_crm_core_dev;
+
+void cam_req_mgr_handle_core_shutdown(void)
+{
+ struct cam_req_mgr_core_session *session;
+ struct cam_req_mgr_core_session *tsession;
+ struct cam_req_mgr_session_info ses_info;
+
+ if (!list_empty(&g_crm_core_dev->session_head)) {
+ list_for_each_entry_safe(session, tsession,
+ &g_crm_core_dev->session_head, entry) {
+ ses_info.session_hdl =
+ session->session_hdl;
+ cam_req_mgr_destroy_session(&ses_info);
+ }
+ }
+}
+
+static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq)
+{
+ int32_t i = 0;
+ int rc = 0;
+ struct crm_task_payload *task_data = NULL;
+
+ task_data = kcalloc(
+ workq->task.num_task, sizeof(*task_data),
+ GFP_KERNEL);
+ if (!task_data) {
+ rc = -ENOMEM;
+ } else {
+ for (i = 0; i < workq->task.num_task; i++)
+ workq->task.pool[i].payload = &task_data[i];
+ }
+
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_find_pd_tbl()
+ *
+ * @brief : Find pipeline delay based table pointer which matches delay
+ * @tbl : Pointer to list of request table
+ * @delay : Pipeline delay value to be searched for comparison
+ *
+ * @return : pointer to request table for matching pipeline delay table.
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_find_pd_tbl(
+ struct cam_req_mgr_req_tbl *tbl, int32_t delay)
+{
+ if (!tbl)
+ return NULL;
+
+ do {
+ if (delay != tbl->pd)
+ tbl = tbl->next;
+ else
+ return tbl;
+ } while (tbl != NULL);
+
+ return NULL;
+}
+
+/**
+ * __cam_req_mgr_inc_idx()
+ *
+ * @brief : Increment val passed by step size and rollover after max_val
+ * @val : value to be incremented
+ * @step : amount/step by which val is incremented
+ * @max_val : max val after which idx will roll over
+ *
+ */
+static void __cam_req_mgr_inc_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+ *val = (*val + step) % max_val;
+}
+
+/**
+ * __cam_req_mgr_dec_idx()
+ *
+ * @brief : Decrement val passed by step size and rollover after max_val
+ * @val : value to be decremented
+ * @step : amount/step by which val is decremented
+ * @max_val : after zero value will roll over to max val
+ *
+ */
+static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+ *val = *val - step;
+ if (*val < 0)
+ *val = max_val + (*val);
+}
+
+/**
+ * __cam_req_mgr_validate_inject_delay()
+ *
+ * @brief : Check if any pd device is introducing inject delay
+ * @tbl : cam_req_mgr_req_tbl
+ * @curr_idx : slot idx
+ *
+ * @return : 0 for success, negative for failure
+ */
+static int __cam_req_mgr_validate_inject_delay(
+ struct cam_req_mgr_req_tbl *tbl,
+ int32_t curr_idx)
+{
+ struct cam_req_mgr_tbl_slot *slot = NULL;
+
+ while (tbl) {
+ slot = &tbl->slot[curr_idx];
+ if (slot->inject_delay > 0) {
+ slot->inject_delay--;
+ return -EAGAIN;
+ }
+ __cam_req_mgr_dec_idx(&curr_idx, tbl->pd_delta,
+ tbl->num_slots);
+ tbl = tbl->next;
+ }
+ return 0;
+}
+
+/**
+ * __cam_req_mgr_traverse()
+ *
+ * @brief : Traverse through pd tables, it will internally cover all linked
+ * pd tables. Each pd table visited will check if idx passed to its
+ * in ready state. If ready means all devices linked to the pd table
+ * have this request id packet ready. Then it calls subsequent pd
+ * tbl with new idx. New idx value takes into account the delta
+ * between current pd table and next one.
+ * @traverse_data: contains all the info to traverse through pd tables
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
+{
+ int rc = 0;
+ int32_t next_idx = traverse_data->idx;
+ int32_t curr_idx = traverse_data->idx;
+ struct cam_req_mgr_req_tbl *tbl;
+ struct cam_req_mgr_apply *apply_data;
+ struct cam_req_mgr_tbl_slot *slot = NULL;
+
+ if (!traverse_data->tbl || !traverse_data->apply_data) {
+ CAM_ERR(CAM_CRM, "NULL pointer %pK %pK",
+ traverse_data->tbl, traverse_data->apply_data);
+ traverse_data->result = 0;
+ return -EINVAL;
+ }
+
+ tbl = traverse_data->tbl;
+ apply_data = traverse_data->apply_data;
+ slot = &tbl->slot[curr_idx];
+ CAM_DBG(CAM_CRM,
+ "Enter pd %d idx %d state %d skip %d status %d skip_idx %d",
+ tbl->pd, curr_idx, tbl->slot[curr_idx].state,
+ tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status,
+ traverse_data->in_q->slot[curr_idx].skip_idx);
+
+ if ((traverse_data->self_link == true) &&
+ (!traverse_data->inject_delay_chk)) {
+ rc = __cam_req_mgr_validate_inject_delay(tbl, curr_idx);
+ if (rc) {
+ CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
+ apply_data[tbl->pd].req_id = -1;
+ /* This pd tbl not ready to proceed with asked idx */
+ SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+ return -EAGAIN;
+ }
+ traverse_data->inject_delay_chk = true;
+ }
+
+ /* Check if req is ready or in skip mode or pd tbl is in skip mode */
+ if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
+ traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
+ tbl->skip_traverse > 0) {
+ if (tbl->next) {
+ __cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
+ tbl->num_slots);
+ traverse_data->idx = next_idx;
+ traverse_data->tbl = tbl->next;
+ rc = __cam_req_mgr_traverse(traverse_data);
+ }
+ if (rc >= 0) {
+ SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
+
+ if (traverse_data->validate_only == false) {
+ apply_data[tbl->pd].pd = tbl->pd;
+ apply_data[tbl->pd].req_id =
+ CRM_GET_REQ_ID(
+ traverse_data->in_q, curr_idx);
+ apply_data[tbl->pd].idx = curr_idx;
+
+ CAM_DBG(CAM_CRM, "req_id: %lld with pd of %d",
+ apply_data[tbl->pd].req_id,
+ apply_data[tbl->pd].pd);
+ /*
+ * If traverse is successful decrement
+ * traverse skip
+ */
+ if (tbl->skip_traverse > 0) {
+ apply_data[tbl->pd].req_id = -1;
+ tbl->skip_traverse--;
+ }
+ }
+ } else {
+ /* linked pd table is not ready for this traverse yet */
+ return rc;
+ }
+ } else {
+ /* This pd table is not ready to proceed with asked idx */
+ CAM_INFO(CAM_CRM,
+ "Skip Frame: req: %lld not ready pd: %d open_req count: %d",
+ CRM_GET_REQ_ID(traverse_data->in_q, curr_idx),
+ tbl->pd,
+ traverse_data->open_req_cnt);
+ SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+/**
+ * __cam_req_mgr_in_q_skip_idx()
+ *
+ * @brief : Decrement val passed by step size and rollover after max_val
+ * @in_q : input queue pointer
+ * @idx : Sets skip_idx bit of the particular slot to true so when traverse
+ * happens for this idx, no req will be submitted for devices
+ * handling this idx.
+ *
+ */
+static void __cam_req_mgr_in_q_skip_idx(struct cam_req_mgr_req_queue *in_q,
+ int32_t idx)
+{
+ in_q->slot[idx].req_id = -1;
+ in_q->slot[idx].skip_idx = 1;
+ in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+ CAM_DBG(CAM_CRM, "SET IDX SKIP on slot= %d", idx);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_id()
+ *
+ * @brief : Set unique id to table
+ * @tbl : pipeline based table which requires new id
+ * @req : pointer to request data wihch contains num_tables counter
+ *
+ */
+static void __cam_req_mgr_tbl_set_id(struct cam_req_mgr_req_tbl *tbl,
+ struct cam_req_mgr_req_data *req)
+{
+ if (!tbl)
+ return;
+ do {
+ tbl->id = req->num_tbl++;
+ CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
+ tbl->id, tbl->pd, tbl->skip_traverse,
+ tbl->pd_delta);
+ tbl = tbl->next;
+ } while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_all_skip_cnt()
+ *
+ * @brief : Each pd table sets skip value based on delta between itself and
+ * max pd value. During initial streamon or bubble case this is
+ * used. That way each pd table skips required num of traverse and
+ * align themselve with req mgr connected devs.
+ * @l_tbl : iterates through list of pd tables and sets skip traverse
+ *
+ */
+static void __cam_req_mgr_tbl_set_all_skip_cnt(
+ struct cam_req_mgr_req_tbl **l_tbl)
+{
+ struct cam_req_mgr_req_tbl *tbl = *l_tbl;
+ int32_t max_pd;
+
+ if (!tbl)
+ return;
+
+ max_pd = tbl->pd;
+ do {
+ tbl->skip_traverse = max_pd - tbl->pd;
+ CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
+ tbl->id, tbl->pd, tbl->skip_traverse,
+ tbl->pd_delta);
+ tbl = tbl->next;
+ } while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_reset_req_slot()
+ *
+ * @brief : reset specified idx/slot in input queue as well as all pd tables
+ * @link : link pointer
+ * @idx : slot index which will be reset
+ *
+ */
+static void __cam_req_mgr_reset_req_slot(struct cam_req_mgr_core_link *link,
+ int32_t idx)
+{
+ struct cam_req_mgr_slot *slot;
+ struct cam_req_mgr_req_tbl *tbl = link->req.l_tbl;
+ struct cam_req_mgr_req_queue *in_q = link->req.in_q;
+
+ slot = &in_q->slot[idx];
+ CAM_DBG(CAM_CRM, "RESET: idx: %d: slot->status %d", idx, slot->status);
+
+ /* Check if CSL has already pushed new request*/
+ if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
+ return;
+
+ /* Reset input queue slot */
+ slot->req_id = -1;
+ slot->skip_idx = 0;
+ slot->recover = 0;
+ slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+ slot->status = CRM_SLOT_STATUS_NO_REQ;
+
+ /* Reset all pd table slot */
+ while (tbl != NULL) {
+ CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d",
+ tbl->pd, idx, tbl->slot[idx].state);
+ tbl->slot[idx].req_ready_map = 0;
+ tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
+ tbl = tbl->next;
+ }
+}
+
+/**
+ * __cam_req_mgr_check_next_req_slot()
+ *
+ * @brief : While streaming if input queue does not contain any pending
+ * request, req mgr still needs to submit pending request ids to
+ * devices with lower pipeline delay value.
+ * @in_q : Pointer to input queue where req mgr wil peep into
+ *
+ */
+static void __cam_req_mgr_check_next_req_slot(
+ struct cam_req_mgr_req_queue *in_q)
+{
+ int32_t idx = in_q->rd_idx;
+ struct cam_req_mgr_slot *slot;
+
+ __cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
+ slot = &in_q->slot[idx];
+
+ CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status);
+
+ /* Check if there is new req from CSL, if not complete req */
+ if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+ __cam_req_mgr_in_q_skip_idx(in_q, idx);
+ if (in_q->wr_idx != idx)
+ CAM_WARN(CAM_CRM,
+ "CHECK here wr %d, rd %d", in_q->wr_idx, idx);
+ __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+ }
+}
+
+/**
+ * __cam_req_mgr_send_req()
+ *
+ * @brief : send request id to be applied to each device connected on link
+ * @link : pointer to link whose input queue and req tbl are
+ * traversed through
+ * @in_q : pointer to input request queue
+ *
+ * @return : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_req_queue *in_q, uint32_t trigger)
+{
+ int rc = 0, pd, i, idx;
+ struct cam_req_mgr_connected_device *dev = NULL;
+ struct cam_req_mgr_apply_request apply_req;
+ struct cam_req_mgr_link_evt_data evt_data;
+ struct cam_req_mgr_tbl_slot *slot = NULL;
+
+ apply_req.link_hdl = link->link_hdl;
+ apply_req.report_if_bubble = 0;
+
+ for (i = 0; i < link->num_devs; i++) {
+ dev = &link->l_dev[i];
+ if (!dev)
+ continue;
+ pd = dev->dev_info.p_delay;
+ if (pd >= CAM_PIPELINE_DELAY_MAX) {
+ CAM_WARN(CAM_CRM, "pd %d greater than max",
+ pd);
+ continue;
+ }
+
+ idx = link->req.apply_data[pd].idx;
+ slot = &dev->pd_tbl->slot[idx];
+ /*
+ * Just let flash go for this request and other
+ * device get restricted
+ */
+
+ if ((slot->skip_next_frame != true) ||
+ (slot->dev_hdl != dev->dev_hdl))
+ continue;
+
+ if (!(dev->dev_info.trigger & trigger))
+ continue;
+
+ apply_req.dev_hdl = dev->dev_hdl;
+ apply_req.request_id =
+ link->req.apply_data[pd].req_id;
+ apply_req.trigger_point = trigger;
+ if (dev->ops && dev->ops->apply_req) {
+ rc = dev->ops->apply_req(&apply_req);
+ if (rc)
+ return rc;
+ CAM_DBG(CAM_REQ,
+ "SEND: link_hdl: %x pd: %d req_id %lld",
+ link->link_hdl, pd, apply_req.request_id);
+ slot->skip_next_frame = false;
+ slot->is_applied = true;
+ return -EAGAIN;
+ }
+ }
+
+ for (i = 0; i < link->num_devs; i++) {
+ dev = &link->l_dev[i];
+ if (dev) {
+ pd = dev->dev_info.p_delay;
+ if (pd >= CAM_PIPELINE_DELAY_MAX) {
+ CAM_WARN(CAM_CRM, "pd %d greater than max",
+ pd);
+ continue;
+ }
+ if (link->req.apply_data[pd].skip_idx ||
+ link->req.apply_data[pd].req_id < 0) {
+ CAM_DBG(CAM_CRM, "skip %d req_id %lld",
+ link->req.apply_data[pd].skip_idx,
+ link->req.apply_data[pd].req_id);
+ continue;
+ }
+ if (!(dev->dev_info.trigger & trigger))
+ continue;
+
+ apply_req.dev_hdl = dev->dev_hdl;
+ apply_req.request_id =
+ link->req.apply_data[pd].req_id;
+ idx = link->req.apply_data[pd].idx;
+ slot = &dev->pd_tbl->slot[idx];
+ apply_req.report_if_bubble =
+ in_q->slot[idx].recover;
+
+ if ((slot->dev_hdl == dev->dev_hdl) &&
+ (slot->is_applied == true)) {
+ slot->is_applied = false;
+ continue;
+ }
+
+ trace_cam_req_mgr_apply_request(link, &apply_req, dev);
+
+ apply_req.trigger_point = trigger;
+ CAM_DBG(CAM_REQ,
+ "SEND: link_hdl: %x pd %d req_id %lld",
+ link->link_hdl, pd, apply_req.request_id);
+ if (dev->ops && dev->ops->apply_req) {
+ rc = dev->ops->apply_req(&apply_req);
+ if (rc < 0)
+ break;
+
+ if (pd == link->max_delay)
+ link->open_req_cnt--;
+ }
+ }
+ }
+ if (rc < 0) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
+ dev->dev_info.p_delay, apply_req.request_id);
+ /* Apply req failed notify already applied devs */
+ for (; i >= 0; i--) {
+ dev = &link->l_dev[i];
+ evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_ERR;
+ evt_data.dev_hdl = dev->dev_hdl;
+ evt_data.link_hdl = link->link_hdl;
+ evt_data.req_id = apply_req.request_id;
+ evt_data.u.error = CRM_KMD_ERR_BUBBLE;
+ if (dev->ops && dev->ops->process_evt)
+ dev->ops->process_evt(&evt_data);
+ }
+ }
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_check_link_is_ready()
+ *
+ * @brief : traverse through all request tables and see if all devices are
+ * ready to apply request settings.
+ * @link : pointer to link whose input queue and req tbl are
+ * traversed through
+ * @idx : index within input request queue
+ * @validate_only : Whether to validate only and/or update settings
+ * @self_link : To indicate whether the validation is for the given link or
+ * other sync link
+ *
+ * @return : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
+ int32_t idx, bool validate_only, bool self_link)
+{
+ int rc;
+ struct cam_req_mgr_traverse traverse_data;
+ struct cam_req_mgr_req_queue *in_q;
+ struct cam_req_mgr_apply *apply_data;
+
+ in_q = link->req.in_q;
+
+ apply_data = link->req.apply_data;
+
+ if (validate_only == false) {
+ memset(apply_data, 0,
+ sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX);
+ }
+
+ traverse_data.apply_data = apply_data;
+ traverse_data.idx = idx;
+ traverse_data.tbl = link->req.l_tbl;
+ traverse_data.in_q = in_q;
+ traverse_data.result = 0;
+ traverse_data.validate_only = validate_only;
+ traverse_data.self_link = self_link;
+ traverse_data.inject_delay_chk = false;
+ traverse_data.open_req_cnt = link->open_req_cnt;
+ /*
+ * Traverse through all pd tables, if result is success,
+ * apply the settings
+ */
+
+ rc = __cam_req_mgr_traverse(&traverse_data);
+ CAM_DBG(CAM_CRM,
+ "SOF: idx %d self_link %d validate %d result %x pd_mask %x rc %d",
+ idx, traverse_data.self_link, traverse_data.validate_only,
+ traverse_data.result, link->pd_mask, rc);
+
+ if (!rc && traverse_data.result == link->pd_mask) {
+ CAM_DBG(CAM_CRM,
+ "READY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
+ link->link_hdl, idx,
+ apply_data[2].req_id,
+ apply_data[1].req_id,
+ apply_data[0].req_id);
+ } else
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_find_slot_for_req()
+ *
+ * @brief : Find idx from input queue at which req id is enqueued
+ * @in_q : input request queue pointer
+ * @req_id : request id which needs to be searched in input queue
+ *
+ * @return : slot index where passed request id is stored, -1 for failure
+ *
+ */
+static int32_t __cam_req_mgr_find_slot_for_req(
+ struct cam_req_mgr_req_queue *in_q, int64_t req_id)
+{
+ int32_t idx, i;
+ struct cam_req_mgr_slot *slot;
+
+ idx = in_q->rd_idx;
+ for (i = 0; i < in_q->num_slots; i++) {
+ slot = &in_q->slot[idx];
+ if (slot->req_id == req_id) {
+ CAM_DBG(CAM_CRM,
+ "req: %lld found at idx: %d status: %d sync_mode: %d",
+ req_id, idx, slot->status, slot->sync_mode);
+ break;
+ }
+ __cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
+ }
+ if (i >= in_q->num_slots)
+ idx = -1;
+
+ return idx;
+}
+
+/**
+ * __cam_req_mgr_reset_sof_cnt()
+ *
+ * @brief : the sof_count for both the links are reset
+ * @link : pointer to link whose input queue and req tbl are
+ * traversed through
+ *
+ */
+static void __cam_req_mgr_reset_sof_cnt(
+ struct cam_req_mgr_core_link *link)
+{
+ link->sof_counter = -1;
+ link->sync_link->sof_counter = -1;
+ link->frame_skip_flag = false;
+
+ CAM_DBG(CAM_CRM,
+ "link_hdl %x self_counter %lld other_counter %lld frame_skip_lag %d",
+ link->link_hdl, link->sof_counter,
+ link->sync_link->sof_counter, link->frame_skip_flag);
+}
+
+/**
+ * __cam_req_mgr_sof_cnt_initialize()
+ *
+ * @brief : when the sof count is intially -1 it increments count
+ * and computes the sync_self_ref for this link
+ * the count needs to be wrapped back starting from 0
+ * @link : pointer to link whose input queue and req tbl are
+ * traversed through
+ *
+ */
+static void __cam_req_mgr_sof_cnt_initialize(
+ struct cam_req_mgr_core_link *link)
+{
+ link->sof_counter++;
+ link->sync_self_ref = link->sof_counter -
+ link->sync_link->sof_counter;
+
+ CAM_DBG(CAM_CRM,
+ "link_hdl %x self_counter %lld other_counter %lld",
+ link->link_hdl, link->sof_counter,
+ link->sync_link->sof_counter);
+}
+
+/**
+ * __cam_req_mgr_wrap_sof_cnt()
+ *
+ * @brief : once the sof count reaches a predefined maximum
+ * the count needs to be wrapped back starting from 0
+ * @link : pointer to link whose input queue and req tbl are
+ * traversed through
+ *
+ */
+static void __cam_req_mgr_wrap_sof_cnt(
+ struct cam_req_mgr_core_link *link)
+{
+ link->sof_counter = (MAX_SYNC_COUNT -
+ (link->sync_link->sof_counter));
+ link->sync_link->sof_counter = 0;
+
+ CAM_DBG(CAM_CRM,
+ "link_hdl %x self_counter %lld sync_link_hdl %x other_counter %lld",
+ link->link_hdl, link->sof_counter,
+ link->sync_link->link_hdl, link->sync_link->sof_counter);
+}
+
+/**
+ * __cam_req_mgr_validate_sof_cnt()
+ *
+ * @brief : validates sof count difference for a given link
+ * @link : pointer to link whose input queue and req tbl are
+ * traversed through
+ * @sync_link : pointer to the sync link
+ * @return : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_validate_sof_cnt(
+ struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_core_link *sync_link)
+{
+ int64_t sync_diff = 0;
+ int rc = 0;
+
+ if (link->sof_counter == MAX_SYNC_COUNT)
+ __cam_req_mgr_wrap_sof_cnt(link);
+
+ sync_diff = link->sof_counter - sync_link->sof_counter;
+
+ CAM_DBG(CAM_CRM,
+ "link[%x] self_counter=%lld other_counter=%lld diff=%lld sync_self_ref=%lld",
+ link->link_hdl, link->sof_counter,
+ sync_link->sof_counter, sync_diff, link->sync_self_ref);
+
+ if (sync_diff > SYNC_LINK_SOF_CNT_MAX_LMT) {
+ link->sync_link->frame_skip_flag = true;
+ CAM_WARN(CAM_CRM,
+ "Detected anomaly, skip link_hdl %x self_counter=%lld other_counter=%lld sync_self_ref=%lld",
+ link->link_hdl, link->sof_counter,
+ sync_link->sof_counter, link->sync_self_ref);
+ rc = -EPERM;
+ }
+
+ return rc;
+}
+
+
+/**
+ * __cam_req_mgr_process_sync_req()
+ *
+ * @brief : processes requests during sync mode
+ * @link : pointer to link whose input queue and req tbl are
+ * traversed through
+ * @slot : pointer to the current slot being processed
+ * @return : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_process_sync_req(
+ struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_slot *slot)
+{
+ struct cam_req_mgr_core_link *sync_link = NULL;
+ int64_t req_id = 0;
+ int sync_slot_idx = 0, rc = 0;
+
+ if (!link->sync_link) {
+ CAM_ERR(CAM_CRM, "Sync link null");
+ return -EINVAL;
+ }
+
+ sync_link = link->sync_link;
+ req_id = slot->req_id;
+
+ CAM_DBG(CAM_REQ,
+ "link_hdl %x req %lld sync_self_ref %lld sof_counter %lld frame_skip_flag %d sync_link_self_ref %lld",
+ link->link_hdl, req_id, link->sync_self_ref, link->sof_counter,
+ link->frame_skip_flag, link->sync_link->sync_self_ref);
+
+ if (sync_link->sync_link_sof_skip) {
+ CAM_DBG(CAM_REQ,
+ "No req applied on corresponding SOF on sync link: %x",
+ sync_link->link_hdl);
+ sync_link->sync_link_sof_skip = false;
+ /*It is to manage compensate inject delay for each pd*/
+ __cam_req_mgr_check_link_is_ready(link, slot->idx, true, true);
+ return -EINVAL;
+ }
+
+ if (link->sof_counter == -1) {
+ __cam_req_mgr_sof_cnt_initialize(link);
+ } else if ((link->frame_skip_flag) &&
+ (sync_link->sync_self_ref != -1)) {
+ CAM_DBG(CAM_REQ, "Link[%x] Req[%lld] Resetting values ",
+ link->link_hdl, req_id);
+ __cam_req_mgr_reset_sof_cnt(link);
+ __cam_req_mgr_sof_cnt_initialize(link);
+ } else {
+ link->sof_counter++;
+ }
+
+ rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true, true);
+ if (rc) {
+ CAM_DBG(CAM_REQ,
+ "Req: %lld [My link] not ready on link: %x, rc=%d",
+ req_id, link->link_hdl, rc);
+ link->sync_link_sof_skip = true;
+ goto failure;
+ }
+
+ sync_slot_idx = __cam_req_mgr_find_slot_for_req(
+ sync_link->req.in_q, req_id);
+
+ if (sync_slot_idx != -1) {
+ rc = __cam_req_mgr_check_link_is_ready(
+ sync_link, sync_slot_idx, true, false);
+ CAM_DBG(CAM_CRM, "sync_slot_idx=%d, status=%d, rc=%d",
+ sync_slot_idx,
+ sync_link->req.in_q->slot[sync_slot_idx].status,
+ rc);
+ } else {
+ CAM_DBG(CAM_CRM, "sync_slot_idx=%d, rc=%d",
+ sync_slot_idx, rc);
+ }
+
+ if ((sync_slot_idx != -1) &&
+ ((sync_link->req.in_q->slot[sync_slot_idx].status ==
+ CRM_SLOT_STATUS_REQ_APPLIED) || (rc == 0))) {
+ rc = __cam_req_mgr_validate_sof_cnt(link, sync_link);
+ if (rc) {
+ CAM_DBG(CAM_CRM,
+ "Req: %lld validate failed: %x",
+ req_id, sync_link->link_hdl);
+ goto failure;
+ }
+
+ CAM_DBG(CAM_REQ,
+ "Req: %lld ready to apply on link: %x [validation successful]",
+ req_id, link->link_hdl);
+ /*
+ * At this point all validation is successfully done
+ * and we can proceed to apply the given request.
+ * Ideally the next call should return success.
+ */
+ rc = __cam_req_mgr_check_link_is_ready(link,
+ slot->idx, false, true);
+
+ if (rc)
+ CAM_WARN(CAM_CRM, "Unexpected return value rc: %d", rc);
+ } else {
+ CAM_DBG(CAM_REQ,
+ "Req: %lld [Other link] not ready to apply on link: %x",
+ req_id, sync_link->link_hdl);
+ rc = -EPERM;
+ link->sync_link_sof_skip = true;
+ goto failure;
+ }
+
+ return rc;
+
+failure:
+ link->sof_counter--;
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_process_req()
+ *
+ * @brief : processes read index in request queue and traverse through table
+ * @link : pointer to link whose input queue and req tbl are
+ * traversed through
+ *
+ * @return : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
+ uint32_t trigger)
+{
+ int rc = 0, idx;
+ struct cam_req_mgr_slot *slot = NULL;
+ struct cam_req_mgr_req_queue *in_q;
+ struct cam_req_mgr_core_session *session;
+
+ in_q = link->req.in_q;
+ session = (struct cam_req_mgr_core_session *)link->parent;
+ mutex_lock(&session->lock);
+ /*
+ * Check if new read index,
+ * - if in pending state, traverse again to complete
+ * transaction of this read index.
+ * - if in applied_state, somthign wrong.
+ * - if in no_req state, no new req
+ */
+ CAM_DBG(CAM_REQ, "SOF Req[%lld] idx %d req_status %d link_hdl %x",
+ in_q->slot[in_q->rd_idx].req_id, in_q->rd_idx,
+ in_q->slot[in_q->rd_idx].status, link->link_hdl);
+
+ slot = &in_q->slot[in_q->rd_idx];
+ if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+ CAM_DBG(CAM_CRM, "No Pending req");
+ rc = 0;
+ goto error;
+ }
+
+ if ((trigger != CAM_TRIGGER_POINT_SOF) &&
+ (trigger != CAM_TRIGGER_POINT_EOF))
+ goto error;
+
+ if ((trigger == CAM_TRIGGER_POINT_EOF) &&
+ (!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
+ CAM_DBG(CAM_CRM, "Applying for last SOF fails");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (trigger == CAM_TRIGGER_POINT_SOF) {
+ if (link->trigger_mask) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM,
+ "Applying for last EOF fails");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC)
+ rc = __cam_req_mgr_process_sync_req(link, slot);
+ else
+ rc = __cam_req_mgr_check_link_is_ready(link,
+ slot->idx, false, true);
+
+ if (rc < 0) {
+ /*
+ * If traverse result is not success, then some devices
+ * are not ready with packet for the asked request id,
+ * hence try again in next sof
+ */
+ slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state == CAM_CRM_LINK_STATE_ERR) {
+ /*
+ * During error recovery all tables should be
+ * ready, don't expect to enter here.
+ * @TODO: gracefully handle if recovery fails.
+ */
+ CAM_ERR_RATE_LIMIT(CAM_CRM,
+ "FATAL recovery cant finish idx %d status %d",
+ in_q->rd_idx,
+ in_q->slot[in_q->rd_idx].status);
+ rc = -EPERM;
+ }
+ spin_unlock_bh(&link->link_state_spin_lock);
+ goto error;
+ }
+ }
+
+ rc = __cam_req_mgr_send_req(link, link->req.in_q, trigger);
+ if (rc < 0) {
+ /* Apply req failed retry at next sof */
+ slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+ } else {
+ link->trigger_mask |= trigger;
+
+ CAM_DBG(CAM_CRM, "Applied req[%lld] on link[%x] success",
+ slot->req_id, link->link_hdl);
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state == CAM_CRM_LINK_STATE_ERR) {
+ CAM_WARN(CAM_CRM, "Err recovery done idx %d",
+ in_q->rd_idx);
+ link->state = CAM_CRM_LINK_STATE_READY;
+ }
+ spin_unlock_bh(&link->link_state_spin_lock);
+
+ if (link->sync_link_sof_skip)
+ link->sync_link_sof_skip = false;
+
+ if (link->trigger_mask == link->subscribe_event) {
+ slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
+ link->trigger_mask = 0;
+ CAM_DBG(CAM_CRM, "req %d is applied on link %x",
+ slot->req_id,
+ link->link_hdl);
+ idx = in_q->rd_idx;
+ __cam_req_mgr_dec_idx(
+ &idx, link->max_delay + 1,
+ in_q->num_slots);
+ __cam_req_mgr_reset_req_slot(link, idx);
+ }
+ }
+
+ mutex_unlock(&session->lock);
+ return rc;
+error:
+ mutex_unlock(&session->lock);
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_add_tbl_to_link()
+ *
+ * @brief : Add table to list under link sorted by pd decremeting order
+ * @l_tbl : list of pipeline delay tables.
+ * @new_tbl : new tbl which will be appended to above list as per its pd value
+ *
+ */
+static void __cam_req_mgr_add_tbl_to_link(struct cam_req_mgr_req_tbl **l_tbl,
+ struct cam_req_mgr_req_tbl *new_tbl)
+{
+ struct cam_req_mgr_req_tbl *tbl;
+
+ if (!(*l_tbl) || (*l_tbl)->pd < new_tbl->pd) {
+ new_tbl->next = *l_tbl;
+ if (*l_tbl) {
+ new_tbl->pd_delta =
+ new_tbl->pd - (*l_tbl)->pd;
+ }
+ *l_tbl = new_tbl;
+ } else {
+ tbl = *l_tbl;
+
+ /* Reach existing tbl which has less pd value */
+ while (tbl->next != NULL &&
+ new_tbl->pd < tbl->next->pd) {
+ tbl = tbl->next;
+ }
+ if (tbl->next != NULL) {
+ new_tbl->pd_delta =
+ new_tbl->pd - tbl->next->pd;
+ } else {
+ /* This is last table in linked list*/
+ new_tbl->pd_delta = 0;
+ }
+ new_tbl->next = tbl->next;
+ tbl->next = new_tbl;
+ tbl->pd_delta = tbl->pd - new_tbl->pd;
+ }
+ CAM_DBG(CAM_CRM, "added pd %d tbl to link delta %d", new_tbl->pd,
+ new_tbl->pd_delta);
+}
+
+/**
+ * __cam_req_mgr_create_pd_tbl()
+ *
+ * @brief : Creates new request table for new delay value
+ * @delay : New pd table allocated will have this delay value
+ *
+ * @return : pointer to newly allocated table, NULL for failure
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_create_pd_tbl(int32_t delay)
+{
+ struct cam_req_mgr_req_tbl *tbl =
+ kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
+ if (tbl != NULL) {
+ tbl->num_slots = MAX_REQ_SLOTS;
+ CAM_DBG(CAM_CRM, "pd= %d slots= %d", delay, tbl->num_slots);
+ }
+
+ return tbl;
+}
+
+/**
+ * __cam_req_mgr_destroy_all_tbl()
+ *
+ * @brief : This func will destroy all pipeline delay based req table structs
+ * @l_tbl : pointer to first table in list and it has max pd .
+ *
+ */
+static void __cam_req_mgr_destroy_all_tbl(struct cam_req_mgr_req_tbl **l_tbl)
+{
+ struct cam_req_mgr_req_tbl *tbl = *l_tbl, *temp;
+
+ CAM_DBG(CAM_CRM, "*l_tbl %pK", tbl);
+ while (tbl != NULL) {
+ temp = tbl->next;
+ kfree(tbl);
+ tbl = temp;
+ }
+ *l_tbl = NULL;
+}
+
+/**
+ * __cam_req_mgr_setup_in_q()
+ *
+ * @brief : Initialize req table data
+ * @req : request data pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_setup_in_q(struct cam_req_mgr_req_data *req)
+{
+ int i;
+ struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+ if (!in_q) {
+ CAM_ERR(CAM_CRM, "NULL in_q");
+ return -EINVAL;
+ }
+
+ mutex_lock(&req->lock);
+ in_q->num_slots = MAX_REQ_SLOTS;
+
+ for (i = 0; i < in_q->num_slots; i++) {
+ in_q->slot[i].idx = i;
+ in_q->slot[i].req_id = -1;
+ in_q->slot[i].skip_idx = 0;
+ in_q->slot[i].status = CRM_SLOT_STATUS_NO_REQ;
+ }
+
+ in_q->wr_idx = 0;
+ in_q->rd_idx = 0;
+ mutex_unlock(&req->lock);
+
+ return 0;
+}
+
+/**
+ * __cam_req_mgr_reset_req_tbl()
+ *
+ * @brief : Initialize req table data
+ * @req : request queue pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req)
+{
+ struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+ if (!in_q) {
+ CAM_ERR(CAM_CRM, "NULL in_q");
+ return -EINVAL;
+ }
+
+ mutex_lock(&req->lock);
+ memset(in_q->slot, 0,
+ sizeof(struct cam_req_mgr_slot) * in_q->num_slots);
+ in_q->num_slots = 0;
+
+ in_q->wr_idx = 0;
+ in_q->rd_idx = 0;
+ mutex_unlock(&req->lock);
+
+ return 0;
+}
+
+/**
+ * __cam_req_mgr_notify_sof_freeze()
+ *
+ * @brief : Notify devices on link on detecting a SOF freeze
+ * @link : link on which the sof freeze was detected
+ *
+ */
+static void __cam_req_mgr_notify_sof_freeze(
+ struct cam_req_mgr_core_link *link)
+{
+ int i = 0;
+ struct cam_req_mgr_link_evt_data evt_data;
+ struct cam_req_mgr_connected_device *dev = NULL;
+
+ for (i = 0; i < link->num_devs; i++) {
+ dev = &link->l_dev[i];
+ evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_SOF_FREEZE;
+ evt_data.dev_hdl = dev->dev_hdl;
+ evt_data.link_hdl = link->link_hdl;
+ evt_data.req_id = 0;
+ evt_data.u.error = CRM_KMD_ERR_FATAL;
+ if (dev->ops && dev->ops->process_evt)
+ dev->ops->process_evt(&evt_data);
+ }
+}
+
+/**
+ * __cam_req_mgr_process_sof_freeze()
+ *
+ * @brief : Apoptosis - Handles case when connected devices are not responding
+ * @priv : link information
+ * @data : task data
+ *
+ */
+static int __cam_req_mgr_process_sof_freeze(void *priv, void *data)
+{
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_core_session *session = NULL;
+ struct cam_req_mgr_message msg;
+ int rc = 0;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+ return -EINVAL;
+ }
+
+ link = (struct cam_req_mgr_core_link *)priv;
+ session = (struct cam_req_mgr_core_session *)link->parent;
+
+ CAM_ERR(CAM_CRM, "SOF freeze for session %d link 0x%x",
+ session->session_hdl, link->link_hdl);
+
+ __cam_req_mgr_notify_sof_freeze(link);
+ memset(&msg, 0, sizeof(msg));
+
+ msg.session_hdl = session->session_hdl;
+ msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_DEVICE;
+ msg.u.err_msg.request_id = 0;
+ msg.u.err_msg.link_hdl = link->link_hdl;
+
+ rc = cam_req_mgr_notify_message(&msg,
+ V4L_EVENT_CAM_REQ_MGR_ERROR, V4L_EVENT_CAM_REQ_MGR_EVENT);
+
+ if (rc)
+ CAM_ERR(CAM_CRM,
+ "Error notifying SOF freeze for session %d link 0x%x rc %d",
+ session->session_hdl, link->link_hdl, rc);
+
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_sof_freeze()
+ *
+ * @brief : Callback function for timer timeout indicating SOF freeze
+ * @data : timer pointer
+ *
+ */
+static void __cam_req_mgr_sof_freeze(unsigned long data)
+{
+ struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+ struct crm_workq_task *task = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct crm_task_payload *task_data;
+
+ if (!timer) {
+ CAM_ERR(CAM_CRM, "NULL timer");
+ return;
+ }
+
+ link = (struct cam_req_mgr_core_link *)timer->parent;
+ task = cam_req_mgr_workq_get_task(link->workq);
+ if (!task) {
+ CAM_ERR(CAM_CRM, "No empty task");
+ return;
+ }
+
+ task_data = (struct crm_task_payload *)task->payload;
+ task_data->type = CRM_WORKQ_TASK_NOTIFY_FREEZE;
+ task->process_cb = &__cam_req_mgr_process_sof_freeze;
+ cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+}
+
+/**
+ * __cam_req_mgr_create_subdevs()
+ *
+ * @brief : Create new crm subdev to link with realtime devices
+ * @l_dev : list of subdevs internal to crm
+ * @num_dev : num of subdevs to be created for link
+ *
+ * @return : pointer to allocated list of devices
+ */
+static int __cam_req_mgr_create_subdevs(
+ struct cam_req_mgr_connected_device **l_dev, int32_t num_dev)
+{
+ int rc = 0;
+ *l_dev = (struct cam_req_mgr_connected_device *)
+ kzalloc(sizeof(struct cam_req_mgr_connected_device) * num_dev,
+ GFP_KERNEL);
+ if (!*l_dev)
+ rc = -ENOMEM;
+
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_destroy_subdev()
+ *
+ * @brief : Cleans up the subdevs allocated by crm for link
+ * @l_device : pointer to list of subdevs crm created
+ *
+ */
+static void __cam_req_mgr_destroy_subdev(
+ struct cam_req_mgr_connected_device *l_device)
+{
+ kfree(l_device);
+ l_device = NULL;
+}
+
+/**
+ * __cam_req_mgr_destroy_link_info()
+ *
+ * @brief : Unlinks all devices on the link
+ * @link : pointer to link
+ *
+ * @return : returns if unlink for any device was success or failure
+ */
+static int __cam_req_mgr_disconnect_link(struct cam_req_mgr_core_link *link)
+{
+ int32_t i = 0;
+ struct cam_req_mgr_connected_device *dev;
+ struct cam_req_mgr_core_dev_link_setup link_data;
+ int rc = 0;
+
+ link_data.link_enable = 0;
+ link_data.link_hdl = link->link_hdl;
+ link_data.crm_cb = NULL;
+ link_data.subscribe_event = 0;
+
+ /* Using device ops unlink devices */
+ for (i = 0; i < link->num_devs; i++) {
+ dev = &link->l_dev[i];
+ if (dev == NULL)
+ continue;
+
+ link_data.dev_hdl = dev->dev_hdl;
+ if (dev->ops && dev->ops->link_setup) {
+ rc = dev->ops->link_setup(&link_data);
+ if (rc)
+ CAM_ERR(CAM_CRM,
+ "Unlink failed dev name %s hdl %x",
+ dev->dev_info.name,
+ dev->dev_hdl);
+ }
+ dev->dev_hdl = 0;
+ dev->parent = NULL;
+ dev->ops = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_destroy_link_info()
+ *
+ * @brief : Cleans up the mem allocated while linking
+ * @link : pointer to link, mem associated with this link is freed
+ */
+static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+{
+ __cam_req_mgr_destroy_all_tbl(&link->req.l_tbl);
+ __cam_req_mgr_reset_in_q(&link->req);
+ link->req.num_tbl = 0;
+ mutex_destroy(&link->req.lock);
+
+ link->pd_mask = 0;
+ link->num_devs = 0;
+ link->max_delay = 0;
+}
+
+/**
+ * __cam_req_mgr_reserve_link()
+ *
+ * @brief: Reserves one link data struct within session
+ * @session: session identifier
+ *
+ * @return: pointer to link reserved
+ *
+ */
+static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
+ struct cam_req_mgr_core_session *session)
+{
+ struct cam_req_mgr_core_link *link;
+ struct cam_req_mgr_req_queue *in_q;
+ int i;
+
+ if (!session || !g_crm_core_dev) {
+ CAM_ERR(CAM_CRM, "NULL session/core_dev ptr");
+ return NULL;
+ }
+
+ if (session->num_links >= MAXIMUM_LINKS_PER_SESSION) {
+ CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
+ session->num_links, MAXIMUM_LINKS_PER_SESSION);
+ return NULL;
+ }
+
+ link = (struct cam_req_mgr_core_link *)
+ kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
+ if (!link) {
+ CAM_ERR(CAM_CRM, "failed to create link, no mem");
+ return NULL;
+ }
+ in_q = (struct cam_req_mgr_req_queue *)
+ kzalloc(sizeof(struct cam_req_mgr_req_queue), GFP_KERNEL);
+ if (!in_q) {
+ CAM_ERR(CAM_CRM, "failed to create input queue, no mem");
+ kfree(link);
+ return NULL;
+ }
+ mutex_init(&link->lock);
+ spin_lock_init(&link->link_state_spin_lock);
+
+ mutex_lock(&link->lock);
+ link->state = CAM_CRM_LINK_STATE_AVAILABLE;
+ link->num_devs = 0;
+ link->max_delay = 0;
+ memset(in_q->slot, 0,
+ sizeof(struct cam_req_mgr_slot) * MAX_REQ_SLOTS);
+ link->req.in_q = in_q;
+ in_q->num_slots = 0;
+ link->state = CAM_CRM_LINK_STATE_IDLE;
+ link->parent = (void *)session;
+ link->sync_link = NULL;
+ mutex_unlock(&link->lock);
+
+ mutex_lock(&session->lock);
+ /* Loop through and find a free index */
+ for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+ if (!session->links[i]) {
+ CAM_DBG(CAM_CRM,
+ "Free link index %d found, num_links=%d",
+ i, session->num_links);
+ session->links[i] = link;
+ break;
+ }
+ }
+
+ if (i == MAXIMUM_LINKS_PER_SESSION) {
+ CAM_ERR(CAM_CRM, "Free link index not found");
+ goto error;
+ }
+
+ session->num_links++;
+ CAM_DBG(CAM_CRM, "Active session links (%d)",
+ session->num_links);
+ mutex_unlock(&session->lock);
+
+ return link;
+error:
+ mutex_unlock(&session->lock);
+ kfree(link);
+ kfree(in_q);
+ return NULL;
+}
+
+/*
+ * __cam_req_mgr_free_link()
+ *
+ * @brief: Frees the link and its request queue
+ *
+ * @link: link identifier
+ *
+ */
+static void __cam_req_mgr_free_link(struct cam_req_mgr_core_link *link)
+{
+ kfree(link->req.in_q);
+ link->req.in_q = NULL;
+ kfree(link);
+}
+
+/**
+ * __cam_req_mgr_unreserve_link()
+ *
+ * @brief : Removes the link data struct from the session and frees it
+ * @session: session identifier
+ * @link : link identifier
+ *
+ */
+static void __cam_req_mgr_unreserve_link(
+ struct cam_req_mgr_core_session *session,
+ struct cam_req_mgr_core_link *link)
+{
+ int i;
+
+ if (!session || !link) {
+ CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
+ session, link);
+ return;
+ }
+
+ mutex_lock(&session->lock);
+ if (!session->num_links) {
+ CAM_WARN(CAM_CRM, "No active link or invalid state: hdl %x",
+ link->link_hdl);
+ mutex_unlock(&session->lock);
+ return;
+ }
+
+ for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+ if (session->links[i] == link)
+ session->links[i] = NULL;
+ }
+
+ if ((session->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC) &&
+ (link->sync_link)) {
+ /*
+ * make sure to unlink sync setup under the assumption
+ * of only having 2 links in a given session
+ */
+ session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+ for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+ if (session->links[i])
+ session->links[i]->sync_link = NULL;
+ }
+ }
+
+ session->num_links--;
+ CAM_DBG(CAM_CRM, "Active session links (%d)", session->num_links);
+ mutex_unlock(&session->lock);
+ __cam_req_mgr_free_link(link);
+}
+
+/* Workqueue context processing section */
+
+/**
+ * cam_req_mgr_process_send_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to send
+ * apply request id to drivers.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_send_req(void *priv, void *data)
+{
+ int rc = 0;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_send_request *send_req = NULL;
+ struct cam_req_mgr_req_queue *in_q = NULL;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+ rc = -EINVAL;
+ goto end;
+ }
+ link = (struct cam_req_mgr_core_link *)priv;
+ send_req = (struct cam_req_mgr_send_request *)data;
+ in_q = send_req->in_q;
+
+ rc = __cam_req_mgr_send_req(link, in_q, CAM_TRIGGER_POINT_SOF);
+end:
+ return rc;
+}
+
+/**
+ * cam_req_mgr_process_flush_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ * which requests need to be removed/cancelled.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_flush_req(void *priv, void *data)
+{
+ int rc = 0, i = 0, idx = -1;
+ struct cam_req_mgr_flush_info *flush_info = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_req_queue *in_q = NULL;
+ struct cam_req_mgr_slot *slot = NULL;
+ struct cam_req_mgr_connected_device *device = NULL;
+ struct cam_req_mgr_flush_request flush_req;
+ struct crm_task_payload *task_data = NULL;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+ rc = -EINVAL;
+ goto end;
+ }
+ link = (struct cam_req_mgr_core_link *)priv;
+ task_data = (struct crm_task_payload *)data;
+ flush_info = (struct cam_req_mgr_flush_info *)&task_data->u;
+ CAM_DBG(CAM_REQ, "link_hdl %x req_id %lld type %d",
+ flush_info->link_hdl,
+ flush_info->req_id,
+ flush_info->flush_type);
+
+ in_q = link->req.in_q;
+
+ trace_cam_flush_req(link, flush_info);
+
+ mutex_lock(&link->req.lock);
+ if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ for (i = 0; i < in_q->num_slots; i++) {
+ slot = &in_q->slot[i];
+ slot->req_id = -1;
+ slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+ slot->skip_idx = 1;
+ slot->status = CRM_SLOT_STATUS_NO_REQ;
+ }
+ in_q->wr_idx = 0;
+ in_q->rd_idx = 0;
+ } else if (flush_info->flush_type ==
+ CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
+ if (idx < 0) {
+ CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
+ flush_info->req_id);
+ mutex_unlock(&link->req.lock);
+ return -EINVAL;
+ }
+ CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
+ flush_info->req_id, idx);
+ slot = &in_q->slot[idx];
+ if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
+ slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
+ CAM_WARN(CAM_CRM,
+ "req_id %lld can not be cancelled",
+ flush_info->req_id);
+ mutex_unlock(&link->req.lock);
+ return -EINVAL;
+ }
+ __cam_req_mgr_in_q_skip_idx(in_q, idx);
+ }
+
+ for (i = 0; i < link->num_devs; i++) {
+ device = &link->l_dev[i];
+ flush_req.link_hdl = flush_info->link_hdl;
+ flush_req.dev_hdl = device->dev_hdl;
+ flush_req.req_id = flush_info->req_id;
+ flush_req.type = flush_info->flush_type;
+ /* @TODO: error return handling from drivers */
+ if (device->ops && device->ops->flush_req)
+ rc = device->ops->flush_req(&flush_req);
+ }
+ complete(&link->workq_comp);
+ mutex_unlock(&link->req.lock);
+
+end:
+ return rc;
+}
+
+/**
+ * cam_req_mgr_process_sched_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ * which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_sched_req(void *priv, void *data)
+{
+ int rc = 0;
+ struct cam_req_mgr_sched_request *sched_req = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_req_queue *in_q = NULL;
+ struct cam_req_mgr_slot *slot = NULL;
+ struct crm_task_payload *task_data = NULL;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+ rc = -EINVAL;
+ goto end;
+ }
+ link = (struct cam_req_mgr_core_link *)priv;
+ task_data = (struct crm_task_payload *)data;
+ sched_req = (struct cam_req_mgr_sched_request *)&task_data->u;
+ in_q = link->req.in_q;
+
+ CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld at slot %d sync_mode %d",
+ sched_req->link_hdl, sched_req->req_id,
+ in_q->wr_idx, sched_req->sync_mode);
+
+ mutex_lock(&link->req.lock);
+ slot = &in_q->slot[in_q->wr_idx];
+
+ if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
+ slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
+ CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status);
+
+ slot->status = CRM_SLOT_STATUS_REQ_ADDED;
+ slot->req_id = sched_req->req_id;
+ slot->sync_mode = sched_req->sync_mode;
+ slot->skip_idx = 0;
+ slot->recover = sched_req->bubble_enable;
+ link->open_req_cnt++;
+ __cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+ mutex_unlock(&link->req.lock);
+
+end:
+ return rc;
+}
+
+/**
+ * cam_req_mgr_process_add_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ * which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_add_req(void *priv, void *data)
+{
+ int rc = 0, i = 0, idx;
+ struct cam_req_mgr_add_request *add_req = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_connected_device *device = NULL;
+ struct cam_req_mgr_req_tbl *tbl = NULL;
+ struct cam_req_mgr_tbl_slot *slot = NULL;
+ struct crm_task_payload *task_data = NULL;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ link = (struct cam_req_mgr_core_link *)priv;
+ task_data = (struct crm_task_payload *)data;
+ add_req = (struct cam_req_mgr_add_request *)&task_data->u;
+
+ for (i = 0; i < link->num_devs; i++) {
+ device = &link->l_dev[i];
+ if (device->dev_hdl == add_req->dev_hdl) {
+ tbl = device->pd_tbl;
+ break;
+ }
+ }
+ if (!tbl) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM, "dev_hdl not found %x, %x %x",
+ add_req->dev_hdl,
+ link->l_dev[0].dev_hdl,
+ link->l_dev[1].dev_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+ /*
+ * Go through request table and add
+ * request id to proper table
+ * 1. find req slot in in_q matching req_id.sent by dev
+ * 2. goto table of this device based on p_delay
+ * 3. mark req_ready_map with this dev_bit.
+ */
+
+ mutex_lock(&link->req.lock);
+ idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+ if (idx < 0) {
+ CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
+ rc = -EBADSLT;
+ mutex_unlock(&link->req.lock);
+ goto end;
+ }
+
+ slot = &tbl->slot[idx];
+ slot->is_applied = false;
+ if ((add_req->skip_before_applying & 0xFF) > slot->inject_delay) {
+ slot->inject_delay = (add_req->skip_before_applying & 0xFF);
+ slot->dev_hdl = add_req->dev_hdl;
+ if (add_req->skip_before_applying & SKIP_NEXT_FRAME)
+ slot->skip_next_frame = true;
+ CAM_DBG(CAM_CRM, "Req_id %llu injecting delay %llu",
+ add_req->req_id,
+ (add_req->skip_before_applying & 0xFF));
+ }
+
+ if (slot->state != CRM_REQ_STATE_PENDING &&
+ slot->state != CRM_REQ_STATE_EMPTY) {
+ CAM_WARN(CAM_CRM, "Unexpected state %d for slot %d map %x",
+ slot->state, idx, slot->req_ready_map);
+ }
+
+ slot->state = CRM_REQ_STATE_PENDING;
+ slot->req_ready_map |= (1 << device->dev_bit);
+
+ CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+ idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
+ slot->req_ready_map);
+
+ trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
+
+ if (slot->req_ready_map == tbl->dev_mask) {
+ CAM_DBG(CAM_REQ, "idx %d req_id %lld pd %d SLOT READY",
+ idx, add_req->req_id, tbl->pd);
+ slot->state = CRM_REQ_STATE_READY;
+ }
+ mutex_unlock(&link->req.lock);
+
+end:
+ return rc;
+}
+
+/**
+ * cam_req_mgr_process_error()
+ *
+ * @brief: This runs in workque thread context. bubble /err recovery.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_error(void *priv, void *data)
+{
+ int rc = 0, idx = -1, i;
+ struct cam_req_mgr_error_notify *err_info = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_req_queue *in_q = NULL;
+ struct cam_req_mgr_slot *slot = NULL;
+ struct cam_req_mgr_connected_device *device = NULL;
+ struct cam_req_mgr_link_evt_data evt_data;
+ struct crm_task_payload *task_data = NULL;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+ rc = -EINVAL;
+ goto end;
+ }
+ link = (struct cam_req_mgr_core_link *)priv;
+ task_data = (struct crm_task_payload *)data;
+ err_info = (struct cam_req_mgr_error_notify *)&task_data->u;
+ CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld error %d",
+ err_info->link_hdl,
+ err_info->req_id,
+ err_info->error);
+
+ in_q = link->req.in_q;
+
+ mutex_lock(&link->req.lock);
+ if (err_info->error == CRM_KMD_ERR_BUBBLE) {
+ idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
+ if (idx < 0) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM,
+ "req_id %lld not found in input queue",
+ err_info->req_id);
+ } else {
+ CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
+ err_info->req_id, idx);
+ slot = &in_q->slot[idx];
+ if (!slot->recover) {
+ CAM_WARN(CAM_CRM,
+ "err recovery disabled req_id %lld",
+ err_info->req_id);
+ mutex_unlock(&link->req.lock);
+ return 0;
+ } else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
+ && slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
+ CAM_WARN(CAM_CRM,
+ "req_id %lld can not be recovered %d",
+ err_info->req_id, slot->status);
+ mutex_unlock(&link->req.lock);
+ return -EINVAL;
+ }
+ /* Notify all devices in the link about error */
+ for (i = 0; i < link->num_devs; i++) {
+ device = &link->l_dev[i];
+ if (device != NULL) {
+ evt_data.dev_hdl = device->dev_hdl;
+ evt_data.evt_type =
+ CAM_REQ_MGR_LINK_EVT_ERR;
+ evt_data.link_hdl = link->link_hdl;
+ evt_data.req_id = err_info->req_id;
+ evt_data.u.error = err_info->error;
+ if (device->ops &&
+ device->ops->process_evt)
+ rc = device->ops->process_evt(
+ &evt_data);
+ }
+ }
+ /* Bring processing pointer to bubbled req id */
+ __cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+ in_q->rd_idx = idx;
+ in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+ spin_lock_bh(&link->link_state_spin_lock);
+ link->state = CAM_CRM_LINK_STATE_ERR;
+ spin_unlock_bh(&link->link_state_spin_lock);
+ }
+ }
+ mutex_unlock(&link->req.lock);
+
+end:
+ return rc;
+}
+
+/**
+ * cam_req_mgr_process_trigger()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ * which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+static int cam_req_mgr_process_trigger(void *priv, void *data)
+{
+ int rc = 0;
+ struct cam_req_mgr_trigger_notify *trigger_data = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_req_queue *in_q = NULL;
+ struct crm_task_payload *task_data = NULL;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+ rc = -EINVAL;
+ goto end;
+ }
+ link = (struct cam_req_mgr_core_link *)priv;
+ task_data = (struct crm_task_payload *)data;
+ trigger_data = (struct cam_req_mgr_trigger_notify *)&task_data->u;
+
+ CAM_DBG(CAM_REQ, "link_hdl %x frame_id %lld, trigger %x\n",
+ trigger_data->link_hdl,
+ trigger_data->frame_id,
+ trigger_data->trigger);
+
+ in_q = link->req.in_q;
+
+ mutex_lock(&link->req.lock);
+ /*
+ * Check if current read index is in applied state, if yes make it free
+ * and increment read index to next slot.
+ */
+ CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
+ link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state == CAM_CRM_LINK_STATE_ERR)
+ CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
+ in_q->rd_idx,
+ in_q->slot[in_q->rd_idx].status);
+
+ spin_unlock_bh(&link->link_state_spin_lock);
+
+ if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
+ /*
+ * Do NOT reset req q slot data here, it can not be done
+ * here because we need to preserve the data to handle bubble.
+ *
+ * Check if any new req is pending in slot, if not finish the
+ * lower pipeline delay device with available req ids.
+ */
+ CAM_DBG(CAM_CRM, "link[%x] Req[%lld] invalidating slot",
+ link->link_hdl, in_q->slot[in_q->rd_idx].req_id);
+ __cam_req_mgr_check_next_req_slot(in_q);
+ __cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots);
+ }
+ rc = __cam_req_mgr_process_req(link, trigger_data->trigger);
+ mutex_unlock(&link->req.lock);
+
+end:
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_dev_handle_to_name()
+ *
+ * @brief : Finds device name based on the device handle
+ * @dev_hdl : Device handle whose name is to be found
+ * @link : Link on which the device is connected
+ * @return : String containing the device name
+ *
+ */
+static const char *__cam_req_mgr_dev_handle_to_name(
+ int32_t dev_hdl, struct cam_req_mgr_core_link *link)
+{
+ struct cam_req_mgr_connected_device *dev = NULL;
+ int i = 0;
+
+ for (i = 0; i < link->num_devs; i++) {
+ dev = &link->l_dev[i];
+
+ if (dev_hdl == dev->dev_hdl)
+ return dev->dev_info.name;
+ }
+
+ return "Invalid dev_hdl";
+}
+
+/* Linked devices' Callback section */
+
+/**
+ * cam_req_mgr_cb_add_req()
+ *
+ * @brief : Drivers call this function to notify new packet is available.
+ * @add_req : Information about new request available at a device.
+ *
+ * @return : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
+{
+ int rc = 0, idx;
+ struct crm_workq_task *task = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_add_request *dev_req;
+ struct crm_task_payload *task_data;
+
+ if (!add_req) {
+ CAM_ERR(CAM_CRM, "sof_data is NULL");
+ return -EINVAL;
+ }
+
+ link = (struct cam_req_mgr_core_link *)
+ cam_get_device_priv(add_req->link_hdl);
+
+ if (!link) {
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_REQ, "dev name %s dev_hdl %d dev req %lld",
+ __cam_req_mgr_dev_handle_to_name(add_req->dev_hdl, link),
+ add_req->dev_hdl, add_req->req_id);
+
+ mutex_lock(&link->lock);
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state < CAM_CRM_LINK_STATE_READY) {
+ CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+ rc = -EPERM;
+ spin_unlock_bh(&link->link_state_spin_lock);
+ goto end;
+ }
+ spin_unlock_bh(&link->link_state_spin_lock);
+
+ /* Validate if req id is present in input queue */
+ idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+ if (idx < 0) {
+ CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
+ rc = -ENOENT;
+ goto end;
+ }
+
+ task = cam_req_mgr_workq_get_task(link->workq);
+ if (!task) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task dev %x req %lld",
+ add_req->dev_hdl, add_req->req_id);
+ rc = -EBUSY;
+ goto end;
+ }
+
+ task_data = (struct crm_task_payload *)task->payload;
+ task_data->type = CRM_WORKQ_TASK_DEV_ADD_REQ;
+ dev_req = (struct cam_req_mgr_add_request *)&task_data->u;
+ dev_req->req_id = add_req->req_id;
+ dev_req->link_hdl = add_req->link_hdl;
+ dev_req->dev_hdl = add_req->dev_hdl;
+ dev_req->skip_before_applying = add_req->skip_before_applying;
+ task->process_cb = &cam_req_mgr_process_add_req;
+ rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+ CAM_DBG(CAM_CRM, "X: dev %x dev req %lld",
+ add_req->dev_hdl, add_req->req_id);
+
+end:
+ mutex_unlock(&link->lock);
+ return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_err()
+ *
+ * @brief : Error received from device, sends bubble recovery
+ * @err_info : contains information about error occurred like bubble/overflow
+ *
+ * @return : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_notify_err(
+ struct cam_req_mgr_error_notify *err_info)
+{
+ int rc = 0;
+ struct crm_workq_task *task = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_error_notify *notify_err;
+ struct crm_task_payload *task_data;
+
+ if (!err_info) {
+ CAM_ERR(CAM_CRM, "err_info is NULL");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ link = (struct cam_req_mgr_core_link *)
+ cam_get_device_priv(err_info->link_hdl);
+ if (!link) {
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state != CAM_CRM_LINK_STATE_READY) {
+ CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+ spin_unlock_bh(&link->link_state_spin_lock);
+ rc = -EPERM;
+ goto end;
+ }
+ crm_timer_reset(link->watchdog);
+ spin_unlock_bh(&link->link_state_spin_lock);
+
+ task = cam_req_mgr_workq_get_task(link->workq);
+ if (!task) {
+ CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id);
+ rc = -EBUSY;
+ goto end;
+ }
+
+ task_data = (struct crm_task_payload *)task->payload;
+ task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR;
+ notify_err = (struct cam_req_mgr_error_notify *)&task_data->u;
+ notify_err->req_id = err_info->req_id;
+ notify_err->link_hdl = err_info->link_hdl;
+ notify_err->dev_hdl = err_info->dev_hdl;
+ notify_err->error = err_info->error;
+ task->process_cb = &cam_req_mgr_process_error;
+ rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+ return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_trigger()
+ *
+ * @brief : SOF received from device, sends trigger through workqueue
+ * @sof_data: contains information about frame_id, link etc.
+ *
+ * @return : 0 on success
+ *
+ */
+static int cam_req_mgr_cb_notify_trigger(
+ struct cam_req_mgr_trigger_notify *trigger_data)
+{
+ int rc = 0;
+ struct crm_workq_task *task = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_trigger_notify *notify_trigger;
+ struct crm_task_payload *task_data;
+
+ if (!trigger_data) {
+ CAM_ERR(CAM_CRM, "sof_data is NULL");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ link = (struct cam_req_mgr_core_link *)
+ cam_get_device_priv(trigger_data->link_hdl);
+ if (!link) {
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", trigger_data->link_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state < CAM_CRM_LINK_STATE_READY) {
+ CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+ spin_unlock_bh(&link->link_state_spin_lock);
+ rc = -EPERM;
+ goto end;
+ }
+ crm_timer_reset(link->watchdog);
+ spin_unlock_bh(&link->link_state_spin_lock);
+
+ task = cam_req_mgr_workq_get_task(link->workq);
+ if (!task) {
+ CAM_ERR(CAM_CRM, "no empty task frame %lld",
+ trigger_data->frame_id);
+ rc = -EBUSY;
+ goto end;
+ }
+ task_data = (struct crm_task_payload *)task->payload;
+ task_data->type = CRM_WORKQ_TASK_NOTIFY_SOF;
+ notify_trigger = (struct cam_req_mgr_trigger_notify *)&task_data->u;
+ notify_trigger->frame_id = trigger_data->frame_id;
+ notify_trigger->link_hdl = trigger_data->link_hdl;
+ notify_trigger->dev_hdl = trigger_data->dev_hdl;
+ notify_trigger->trigger = trigger_data->trigger;
+ task->process_cb = &cam_req_mgr_process_trigger;
+ rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+ return rc;
+}
+
+static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
+ .notify_trigger = cam_req_mgr_cb_notify_trigger,
+ .notify_err = cam_req_mgr_cb_notify_err,
+ .add_req = cam_req_mgr_cb_add_req,
+};
+
+/**
+ * __cam_req_mgr_setup_link_info()
+ *
+ * @brief : Sets up input queue, create pd based tables, communicate with
+ * devs connected on this link and setup communication.
+ * @link : pointer to link to setup
+ * @link_info : link_info coming from CSL to prepare link
+ *
+ * @return : 0 on success, negative in case of failure
+ *
+ */
+static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_link_info *link_info)
+{
+ int rc = 0, i = 0;
+ struct cam_req_mgr_core_dev_link_setup link_data;
+ struct cam_req_mgr_connected_device *dev;
+ struct cam_req_mgr_req_tbl *pd_tbl;
+ enum cam_pipeline_delay max_delay;
+ uint32_t subscribe_event = 0;
+
+ if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES)
+ return -EPERM;
+
+ mutex_init(&link->req.lock);
+ CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock);
+ link->req.num_tbl = 0;
+
+ rc = __cam_req_mgr_setup_in_q(&link->req);
+ if (rc < 0)
+ return rc;
+
+ max_delay = CAM_PIPELINE_DELAY_0;
+ for (i = 0; i < link_info->num_devices; i++) {
+ dev = &link->l_dev[i];
+ /* Using dev hdl, get ops ptr to communicate with device */
+ dev->ops = (struct cam_req_mgr_kmd_ops *)
+ cam_get_device_ops(link_info->dev_hdls[i]);
+ if (!dev->ops ||
+ !dev->ops->get_dev_info ||
+ !dev->ops->link_setup) {
+ CAM_ERR(CAM_CRM, "FATAL: device ops NULL");
+ rc = -ENXIO;
+ goto error;
+ }
+ dev->dev_hdl = link_info->dev_hdls[i];
+ dev->parent = (void *)link;
+ dev->dev_info.dev_hdl = dev->dev_hdl;
+ rc = dev->ops->get_dev_info(&dev->dev_info);
+
+ trace_cam_req_mgr_connect_device(link, &dev->dev_info);
+
+ CAM_DBG(CAM_CRM,
+ "%x: connected: %s, id %d, delay %d, trigger %x",
+ link_info->session_hdl, dev->dev_info.name,
+ dev->dev_info.dev_id, dev->dev_info.p_delay,
+ dev->dev_info.trigger);
+ if (rc < 0 ||
+ dev->dev_info.p_delay >=
+ CAM_PIPELINE_DELAY_MAX ||
+ dev->dev_info.p_delay <
+ CAM_PIPELINE_DELAY_0) {
+ CAM_ERR(CAM_CRM, "get device info failed");
+ goto error;
+ } else {
+ CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
+ link_info->session_hdl,
+ dev->dev_info.name,
+ dev->dev_info.p_delay);
+ if (dev->dev_info.p_delay > max_delay)
+ max_delay = dev->dev_info.p_delay;
+
+ subscribe_event |= (uint32_t)dev->dev_info.trigger;
+ }
+ }
+
+ link->subscribe_event = subscribe_event;
+ link_data.link_enable = 1;
+ link_data.link_hdl = link->link_hdl;
+ link_data.crm_cb = &cam_req_mgr_ops;
+ link_data.max_delay = max_delay;
+ link_data.subscribe_event = subscribe_event;
+
+ for (i = 0; i < link_info->num_devices; i++) {
+ dev = &link->l_dev[i];
+
+ link_data.dev_hdl = dev->dev_hdl;
+ /*
+ * For unique pipeline delay table create request
+ * tracking table
+ */
+ if (link->pd_mask & (1 << dev->dev_info.p_delay)) {
+ pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
+ dev->dev_info.p_delay);
+ if (!pd_tbl) {
+ CAM_ERR(CAM_CRM, "pd %d tbl not found",
+ dev->dev_info.p_delay);
+ rc = -ENXIO;
+ goto error;
+ }
+ } else {
+ pd_tbl = __cam_req_mgr_create_pd_tbl(
+ dev->dev_info.p_delay);
+ if (pd_tbl == NULL) {
+ CAM_ERR(CAM_CRM, "create new pd tbl failed");
+ rc = -ENXIO;
+ goto error;
+ }
+ pd_tbl->pd = dev->dev_info.p_delay;
+ link->pd_mask |= (1 << pd_tbl->pd);
+ /*
+ * Add table to list and also sort list
+ * from max pd to lowest
+ */
+ __cam_req_mgr_add_tbl_to_link(&link->req.l_tbl, pd_tbl);
+ }
+ dev->dev_bit = pd_tbl->dev_count++;
+ dev->pd_tbl = pd_tbl;
+ pd_tbl->dev_mask |= (1 << dev->dev_bit);
+
+ /* Communicate with dev to establish the link */
+ dev->ops->link_setup(&link_data);
+
+ if (link->max_delay < dev->dev_info.p_delay)
+ link->max_delay = dev->dev_info.p_delay;
+ }
+ link->num_devs = link_info->num_devices;
+
+ /* Assign id for pd tables */
+ __cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req);
+
+ /* At start, expect max pd devices, all are in skip state */
+ __cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+
+ return 0;
+
+error:
+ __cam_req_mgr_destroy_link_info(link);
+ return rc;
+}
+
+/* IOCTLs handling section */
+int cam_req_mgr_create_session(
+ struct cam_req_mgr_session_info *ses_info)
+{
+ int rc = 0;
+ int32_t session_hdl;
+ struct cam_req_mgr_core_session *cam_session = NULL;
+
+ if (!ses_info) {
+ CAM_DBG(CAM_CRM, "NULL session info pointer");
+ return -EINVAL;
+ }
+ mutex_lock(&g_crm_core_dev->crm_lock);
+ cam_session = (struct cam_req_mgr_core_session *)
+ kzalloc(sizeof(*cam_session), GFP_KERNEL);
+ if (!cam_session) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ session_hdl = cam_create_session_hdl((void *)cam_session);
+ if (session_hdl < 0) {
+ CAM_ERR(CAM_CRM, "unable to create session_hdl = %x",
+ session_hdl);
+ rc = session_hdl;
+ kfree(cam_session);
+ goto end;
+ }
+ ses_info->session_hdl = session_hdl;
+
+ mutex_init(&cam_session->lock);
+ CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK", &cam_session->lock);
+
+ mutex_lock(&cam_session->lock);
+ cam_session->session_hdl = session_hdl;
+ cam_session->num_links = 0;
+ cam_session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+ list_add(&cam_session->entry, &g_crm_core_dev->session_head);
+ mutex_unlock(&cam_session->lock);
+end:
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return rc;
+}
+
+/**
+ * __cam_req_mgr_unlink()
+ *
+ * @brief : Unlink devices on a link structure from the session
+ * @link : Pointer to the link structure
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link)
+{
+ int rc;
+
+ spin_lock_bh(&link->link_state_spin_lock);
+ link->state = CAM_CRM_LINK_STATE_IDLE;
+ spin_unlock_bh(&link->link_state_spin_lock);
+
+ rc = __cam_req_mgr_disconnect_link(link);
+ if (rc)
+ CAM_ERR(CAM_CORE,
+ "Unlink for all devices was not successful");
+
+ mutex_lock(&link->lock);
+ /* Destroy timer of link */
+ crm_timer_exit(&link->watchdog);
+
+ /* Destroy workq of link */
+ cam_req_mgr_workq_destroy(&link->workq);
+
+ /* Cleanup request tables and unlink devices */
+ __cam_req_mgr_destroy_link_info(link);
+
+ /* Free memory holding data of linked devs */
+ __cam_req_mgr_destroy_subdev(link->l_dev);
+
+ /* Destroy the link handle */
+ rc = cam_destroy_device_hdl(link->link_hdl);
+ if (rc < 0) {
+ CAM_ERR(CAM_CRM, "error destroying link hdl %x rc %d",
+ link->link_hdl, rc);
+ }
+
+ mutex_unlock(&link->lock);
+ return rc;
+}
+
+int cam_req_mgr_destroy_session(
+ struct cam_req_mgr_session_info *ses_info)
+{
+ int rc;
+ int i;
+ struct cam_req_mgr_core_session *cam_session = NULL;
+ struct cam_req_mgr_core_link *link;
+
+ if (!ses_info) {
+ CAM_DBG(CAM_CRM, "NULL session info pointer");
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_crm_core_dev->crm_lock);
+ cam_session = (struct cam_req_mgr_core_session *)
+ cam_get_device_priv(ses_info->session_hdl);
+ if (!cam_session) {
+ CAM_ERR(CAM_CRM, "failed to get session priv");
+ rc = -ENOENT;
+ goto end;
+
+ }
+ if (cam_session->num_links) {
+ CAM_DBG(CAM_CRM, "destroy session %x num_active_links %d",
+ ses_info->session_hdl,
+ cam_session->num_links);
+
+ for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+ link = cam_session->links[i];
+
+ if (!link)
+ continue;
+
+ /* Ignore return value since session is going away */
+ __cam_req_mgr_unlink(link);
+ __cam_req_mgr_free_link(link);
+ }
+ }
+ list_del(&cam_session->entry);
+ mutex_destroy(&cam_session->lock);
+ kfree(cam_session);
+
+ rc = cam_destroy_session_hdl(ses_info->session_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_CRM, "unable to destroy session_hdl = %x rc %d",
+ ses_info->session_hdl, rc);
+
+end:
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return rc;
+}
+
+int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
+{
+ int rc = 0;
+ int wq_flag = 0;
+ char buf[128];
+ struct cam_create_dev_hdl root_dev;
+ struct cam_req_mgr_core_session *cam_session;
+ struct cam_req_mgr_core_link *link;
+
+ if (!link_info) {
+ CAM_DBG(CAM_CRM, "NULL pointer");
+ return -EINVAL;
+ }
+ if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
+ CAM_ERR(CAM_CRM, "Invalid num devices %d",
+ link_info->num_devices);
+ return -EINVAL;
+ }
+
+ /* session hdl's priv data is cam session struct */
+ cam_session = (struct cam_req_mgr_core_session *)
+ cam_get_device_priv(link_info->session_hdl);
+ if (!cam_session) {
+ CAM_DBG(CAM_CRM, "NULL pointer");
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_crm_core_dev->crm_lock);
+
+ /* Allocate link struct and map it with session's request queue */
+ link = __cam_req_mgr_reserve_link(cam_session);
+ if (!link) {
+ CAM_ERR(CAM_CRM, "failed to reserve new link");
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return -EINVAL;
+ }
+ CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
+
+ memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
+ root_dev.session_hdl = link_info->session_hdl;
+ root_dev.priv = (void *)link;
+
+ mutex_lock(&link->lock);
+ /* Create unique dev handle for link */
+ link->link_hdl = cam_create_device_hdl(&root_dev);
+ if (link->link_hdl < 0) {
+ CAM_ERR(CAM_CRM,
+ "Insufficient memory to create new device handle");
+ rc = link->link_hdl;
+ goto link_hdl_fail;
+ }
+ link_info->link_hdl = link->link_hdl;
+
+ /* Allocate memory to hold data of all linked devs */
+ rc = __cam_req_mgr_create_subdevs(&link->l_dev,
+ link_info->num_devices);
+ if (rc < 0) {
+ CAM_ERR(CAM_CRM,
+ "Insufficient memory to create new crm subdevs");
+ goto create_subdev_failed;
+ }
+
+ /* Using device ops query connected devs, prepare request tables */
+ rc = __cam_req_mgr_setup_link_info(link, link_info);
+ if (rc < 0)
+ goto setup_failed;
+
+ spin_lock_bh(&link->link_state_spin_lock);
+ link->state = CAM_CRM_LINK_STATE_READY;
+ spin_unlock_bh(&link->link_state_spin_lock);
+
+ /* Create worker for current link */
+ snprintf(buf, sizeof(buf), "%x-%x",
+ link_info->session_hdl, link->link_hdl);
+ wq_flag = CAM_WORKQ_FLAG_HIGH_PRIORITY | CAM_WORKQ_FLAG_SERIAL;
+ rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
+ &link->workq, CRM_WORKQ_USAGE_NON_IRQ, wq_flag);
+ if (rc < 0) {
+ CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
+ __cam_req_mgr_destroy_link_info(link);
+ goto setup_failed;
+ }
+
+ /* Assign payload to workqueue tasks */
+ rc = __cam_req_mgr_setup_payload(link->workq);
+ if (rc < 0) {
+ __cam_req_mgr_destroy_link_info(link);
+ cam_req_mgr_workq_destroy(&link->workq);
+ goto setup_failed;
+ }
+
+ mutex_unlock(&link->lock);
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return rc;
+setup_failed:
+ __cam_req_mgr_destroy_subdev(link->l_dev);
+create_subdev_failed:
+ cam_destroy_device_hdl(link->link_hdl);
+ link_info->link_hdl = 0;
+link_hdl_fail:
+ mutex_unlock(&link->lock);
+ __cam_req_mgr_unreserve_link(cam_session, link);
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return rc;
+}
+
+int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
+{
+ int rc = 0;
+ struct cam_req_mgr_core_session *cam_session;
+ struct cam_req_mgr_core_link *link;
+
+ if (!unlink_info) {
+ CAM_ERR(CAM_CRM, "NULL pointer");
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_crm_core_dev->crm_lock);
+ CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl);
+
+ /* session hdl's priv data is cam session struct */
+ cam_session = (struct cam_req_mgr_core_session *)
+ cam_get_device_priv(unlink_info->session_hdl);
+ if (!cam_session) {
+ CAM_ERR(CAM_CRM, "NULL pointer");
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return -EINVAL;
+ }
+
+ /* link hdl's priv data is core_link struct */
+ link = cam_get_device_priv(unlink_info->link_hdl);
+ if (!link) {
+ CAM_ERR(CAM_CRM, "NULL pointer");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ rc = __cam_req_mgr_unlink(link);
+
+ /* Free curent link and put back into session's free pool of links */
+ __cam_req_mgr_unreserve_link(cam_session, link);
+
+done:
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return rc;
+}
+
+int cam_req_mgr_schedule_request(
+ struct cam_req_mgr_sched_request *sched_req)
+{
+ int rc = 0;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_core_session *session = NULL;
+ struct cam_req_mgr_sched_request *sched;
+ struct crm_task_payload task_data;
+
+ if (!sched_req) {
+ CAM_ERR(CAM_CRM, "csl_req is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_crm_core_dev->crm_lock);
+ link = (struct cam_req_mgr_core_link *)
+ cam_get_device_priv(sched_req->link_hdl);
+ if (!link) {
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ session = (struct cam_req_mgr_core_session *)link->parent;
+ if (!session) {
+ CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ CAM_DBG(CAM_CRM, "link 0x%x req %lld, sync_mode %d",
+ sched_req->link_hdl, sched_req->req_id, sched_req->sync_mode);
+
+ task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
+ sched = (struct cam_req_mgr_sched_request *)&task_data.u;
+ sched->req_id = sched_req->req_id;
+ sched->sync_mode = sched_req->sync_mode;
+ sched->link_hdl = sched_req->link_hdl;
+ if (session->force_err_recovery == AUTO_RECOVERY) {
+ sched->bubble_enable = sched_req->bubble_enable;
+ } else {
+ sched->bubble_enable =
+ (session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
+ }
+
+ rc = cam_req_mgr_process_sched_req(link, &task_data);
+
+ CAM_DBG(CAM_REQ, "Open req %lld on link 0x%x with sync_mode %d",
+ sched_req->req_id, sched_req->link_hdl, sched_req->sync_mode);
+end:
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return rc;
+}
+
+int cam_req_mgr_sync_config(
+ struct cam_req_mgr_sync_mode *sync_info)
+{
+ int rc = 0;
+ struct cam_req_mgr_core_session *cam_session;
+ struct cam_req_mgr_core_link *link1 = NULL;
+ struct cam_req_mgr_core_link *link2 = NULL;
+
+ if (!sync_info) {
+ CAM_ERR(CAM_CRM, "NULL pointer");
+ return -EINVAL;
+ }
+
+ if ((sync_info->num_links < 0) ||
+ (sync_info->num_links >
+ MAX_LINKS_PER_SESSION)) {
+ CAM_ERR(CAM_CRM, "Invalid num links %d", sync_info->num_links);
+ return -EINVAL;
+ }
+
+ if ((!sync_info->link_hdls[0]) || (!sync_info->link_hdls[1])) {
+ CAM_WARN(CAM_CRM, "Invalid link handles 0x%x 0x%x",
+ sync_info->link_hdls[0], sync_info->link_hdls[1]);
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_crm_core_dev->crm_lock);
+ /* session hdl's priv data is cam session struct */
+ cam_session = (struct cam_req_mgr_core_session *)
+ cam_get_device_priv(sync_info->session_hdl);
+ if (!cam_session) {
+ CAM_ERR(CAM_CRM, "NULL pointer");
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return -EINVAL;
+ }
+
+ mutex_lock(&cam_session->lock);
+
+ CAM_DBG(CAM_CRM, "link handles %x %x",
+ sync_info->link_hdls[0], sync_info->link_hdls[1]);
+
+ /* only two links existing per session in dual cam use case*/
+ link1 = cam_get_device_priv(sync_info->link_hdls[0]);
+ if (!link1) {
+ CAM_ERR(CAM_CRM, "link1 NULL pointer");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ link2 = cam_get_device_priv(sync_info->link_hdls[1]);
+ if (!link2) {
+ CAM_ERR(CAM_CRM, "link2 NULL pointer");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ link1->sof_counter = -1;
+ link1->sync_self_ref = -1;
+ link1->frame_skip_flag = false;
+ link1->sync_link_sof_skip = false;
+ link1->sync_link = link2;
+
+ link2->sof_counter = -1;
+ link2->sync_self_ref = -1;
+ link2->frame_skip_flag = false;
+ link2->sync_link_sof_skip = false;
+ link2->sync_link = link1;
+
+ cam_session->sync_mode = sync_info->sync_mode;
+ CAM_DBG(CAM_REQ,
+ "Sync config on link1 0x%x & link2 0x%x with sync_mode %d",
+ link1->link_hdl, link2->link_hdl, cam_session->sync_mode);
+
+done:
+ mutex_unlock(&cam_session->lock);
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return rc;
+}
+
+int cam_req_mgr_flush_requests(
+ struct cam_req_mgr_flush_info *flush_info)
+{
+ int rc = 0;
+ struct crm_workq_task *task = NULL;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_flush_info *flush;
+ struct crm_task_payload *task_data;
+ struct cam_req_mgr_core_session *session = NULL;
+
+ if (!flush_info) {
+ CAM_ERR(CAM_CRM, "flush req is NULL");
+ rc = -EFAULT;
+ goto end;
+ }
+ if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
+ CAM_ERR(CAM_CRM, "incorrect flush type %x",
+ flush_info->flush_type);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&g_crm_core_dev->crm_lock);
+ /* session hdl's priv data is cam session struct */
+ session = (struct cam_req_mgr_core_session *)
+ cam_get_device_priv(flush_info->session_hdl);
+ if (!session) {
+ CAM_ERR(CAM_CRM, "Invalid session %x", flush_info->session_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+ if (session->num_links <= 0) {
+ CAM_WARN(CAM_CRM, "No active links in session %x",
+ flush_info->session_hdl);
+ goto end;
+ }
+
+ link = (struct cam_req_mgr_core_link *)
+ cam_get_device_priv(flush_info->link_hdl);
+ if (!link) {
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", flush_info->link_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ task = cam_req_mgr_workq_get_task(link->workq);
+ if (!task) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ task_data = (struct crm_task_payload *)task->payload;
+ task_data->type = CRM_WORKQ_TASK_FLUSH_REQ;
+ flush = (struct cam_req_mgr_flush_info *)&task_data->u;
+ flush->req_id = flush_info->req_id;
+ flush->link_hdl = flush_info->link_hdl;
+ flush->flush_type = flush_info->flush_type;
+ task->process_cb = &cam_req_mgr_process_flush_req;
+ init_completion(&link->workq_comp);
+ rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+ /* Blocking call */
+ rc = wait_for_completion_timeout(
+ &link->workq_comp,
+ msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+end:
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+ return rc;
+}
+
+int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
+{
+ int rc = 0;
+ int i, j;
+ struct cam_req_mgr_core_link *link = NULL;
+
+ struct cam_req_mgr_connected_device *dev = NULL;
+ struct cam_req_mgr_link_evt_data evt_data;
+
+ if (!control) {
+ CAM_ERR(CAM_CRM, "Control command is NULL");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (control->num_links > MAX_LINKS_PER_SESSION) {
+ CAM_ERR(CAM_CRM, "Invalid number of links %d",
+ control->num_links);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&g_crm_core_dev->crm_lock);
+ for (i = 0; i < control->num_links; i++) {
+ link = (struct cam_req_mgr_core_link *)
+ cam_get_device_priv(control->link_hdls[i]);
+ if (!link) {
+ CAM_ERR(CAM_CRM, "Link(%d) is NULL on session 0x%x",
+ i, control->session_hdl);
+ rc = -EINVAL;
+ break;
+ }
+
+ mutex_lock(&link->lock);
+ if (control->ops == CAM_REQ_MGR_LINK_ACTIVATE) {
+ /* Start SOF watchdog timer */
+ rc = crm_timer_init(&link->watchdog,
+ CAM_REQ_MGR_WATCHDOG_TIMEOUT, link,
+ &__cam_req_mgr_sof_freeze);
+ if (rc < 0) {
+ CAM_ERR(CAM_CRM,
+ "SOF timer start fails: link=0x%x",
+ link->link_hdl);
+ rc = -EFAULT;
+ }
+ /* notify nodes */
+ for (j = 0; j < link->num_devs; j++) {
+ dev = &link->l_dev[j];
+ evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_RESUME;
+ evt_data.link_hdl = link->link_hdl;
+ evt_data.dev_hdl = dev->dev_hdl;
+ evt_data.req_id = 0;
+ if (dev->ops && dev->ops->process_evt)
+ dev->ops->process_evt(&evt_data);
+ }
+ } else if (control->ops == CAM_REQ_MGR_LINK_DEACTIVATE) {
+ /* Destroy SOF watchdog timer */
+ spin_lock_bh(&link->link_state_spin_lock);
+ crm_timer_exit(&link->watchdog);
+ spin_unlock_bh(&link->link_state_spin_lock);
+ /* notify nodes */
+ for (j = 0; j < link->num_devs; j++) {
+ dev = &link->l_dev[j];
+ evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_PAUSE;
+ evt_data.link_hdl = link->link_hdl;
+ evt_data.dev_hdl = dev->dev_hdl;
+ evt_data.req_id = 0;
+ if (dev->ops && dev->ops->process_evt)
+ dev->ops->process_evt(&evt_data);
+ }
+ } else {
+ CAM_ERR(CAM_CRM, "Invalid link control command");
+ rc = -EINVAL;
+ }
+ mutex_unlock(&link->lock);
+ }
+ mutex_unlock(&g_crm_core_dev->crm_lock);
+end:
+ return rc;
+}
+
+
+int cam_req_mgr_core_device_init(void)
+{
+ CAM_DBG(CAM_CRM, "Enter g_crm_core_dev %pK", g_crm_core_dev);
+
+ if (g_crm_core_dev) {
+ CAM_WARN(CAM_CRM, "core device is already initialized");
+ return 0;
+ }
+ g_crm_core_dev = (struct cam_req_mgr_core_device *)
+ kzalloc(sizeof(*g_crm_core_dev), GFP_KERNEL);
+ if (!g_crm_core_dev)
+ return -ENOMEM;
+
+ CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
+ INIT_LIST_HEAD(&g_crm_core_dev->session_head);
+ mutex_init(&g_crm_core_dev->crm_lock);
+ cam_req_mgr_debug_register(g_crm_core_dev);
+
+ return 0;
+}
+
+int cam_req_mgr_core_device_deinit(void)
+{
+ if (!g_crm_core_dev) {
+ CAM_ERR(CAM_CRM, "NULL pointer");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
+ mutex_destroy(&g_crm_core_dev->crm_lock);
+ kfree(g_crm_core_dev);
+ g_crm_core_dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core.h
new file mode 100644
index 000000000000..b7222237473b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core.h
@@ -0,0 +1,470 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_REQ_MGR_CORE_H_
+#define _CAM_REQ_MGR_CORE_H_
+
+#include <linux/spinlock.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_req_mgr_core_defs.h"
+#include "cam_req_mgr_timer.h"
+
+#define CAM_REQ_MGR_MAX_LINKED_DEV 16
+#define MAX_REQ_SLOTS 48
+
+#define CAM_REQ_MGR_WATCHDOG_TIMEOUT 5000
+#define CAM_REQ_MGR_SCHED_REQ_TIMEOUT 1000
+#define CAM_REQ_MGR_SIMULATE_SCHED_REQ 30
+
+#define FORCE_DISABLE_RECOVERY 2
+#define FORCE_ENABLE_RECOVERY 1
+#define AUTO_RECOVERY 0
+
+#define CRM_WORKQ_NUM_TASKS 60
+
+#define MAX_SYNC_COUNT 65535
+
+#define SYNC_LINK_SOF_CNT_MAX_LMT 1
+
+#define MAXIMUM_LINKS_PER_SESSION 4
+
+/**
+ * enum crm_workq_task_type
+ * @codes: to identify which type of task is present
+ */
+enum crm_workq_task_type {
+ CRM_WORKQ_TASK_GET_DEV_INFO,
+ CRM_WORKQ_TASK_SETUP_LINK,
+ CRM_WORKQ_TASK_DEV_ADD_REQ,
+ CRM_WORKQ_TASK_APPLY_REQ,
+ CRM_WORKQ_TASK_NOTIFY_SOF,
+ CRM_WORKQ_TASK_NOTIFY_ERR,
+ CRM_WORKQ_TASK_NOTIFY_FREEZE,
+ CRM_WORKQ_TASK_SCHED_REQ,
+ CRM_WORKQ_TASK_FLUSH_REQ,
+ CRM_WORKQ_TASK_INVALID,
+};
+
+/**
+ * struct crm_task_payload
+ * @type : to identify which type of task is present
+ * @u : union of payload of all types of tasks supported
+ * @sched_req : contains info of incoming reqest from CSL to CRM
+ * @flush_info : contains info of cancelled reqest
+ * @dev_req : contains tracking info of available req id at device
+ * @send_req : contains info of apply settings to be sent to devs in link
+ * @apply_req : contains info of which request is applied at device
+ * @notify_trigger : contains notification from IFE to CRM about trigger
+ * @notify_err : contains error info happened while processing request
+ * -
+ */
+struct crm_task_payload {
+ enum crm_workq_task_type type;
+ union {
+ struct cam_req_mgr_sched_request sched_req;
+ struct cam_req_mgr_flush_info flush_info;
+ struct cam_req_mgr_add_request dev_req;
+ struct cam_req_mgr_send_request send_req;
+ struct cam_req_mgr_trigger_notify notify_trigger;
+ struct cam_req_mgr_error_notify notify_err;
+ } u;
+};
+
+/**
+ * enum crm_req_state
+ * State machine for life cycle of request in pd table
+ * EMPTY : indicates req slot is empty
+ * PENDING : indicates req slot is waiting for reqs from all devs
+ * READY : indicates req slot is ready to be sent to devs
+ * INVALID : indicates req slot is not in valid state
+ */
+enum crm_req_state {
+ CRM_REQ_STATE_EMPTY,
+ CRM_REQ_STATE_PENDING,
+ CRM_REQ_STATE_READY,
+ CRM_REQ_STATE_INVALID,
+};
+
+/**
+ * enum crm_slot_status
+ * State machine for life cycle of request in input queue
+ * NO_REQ : empty slot
+ * REQ_ADDED : new entry in slot
+ * PENDING : waiting for next trigger to apply
+ * APPLIED : req is sent to all devices
+ * INVALID : invalid state
+ */
+enum crm_slot_status {
+ CRM_SLOT_STATUS_NO_REQ,
+ CRM_SLOT_STATUS_REQ_ADDED,
+ CRM_SLOT_STATUS_REQ_PENDING,
+ CRM_SLOT_STATUS_REQ_APPLIED,
+ CRM_SLOT_STATUS_INVALID,
+};
+
+/**
+ * enum cam_req_mgr_link_state
+ * State machine for life cycle of link in crm
+ * AVAILABLE : link available
+ * IDLE : link initialized but not ready yet
+ * READY : link is ready for use
+ * ERR : link has encountered error
+ * MAX : invalid state
+ */
+enum cam_req_mgr_link_state {
+ CAM_CRM_LINK_STATE_AVAILABLE,
+ CAM_CRM_LINK_STATE_IDLE,
+ CAM_CRM_LINK_STATE_READY,
+ CAM_CRM_LINK_STATE_ERR,
+ CAM_CRM_LINK_STATE_MAX,
+};
+
+/**
+ * struct cam_req_mgr_traverse
+ * @idx : slot index
+ * @result : contains which all tables were able to apply successfully
+ * @tbl : pointer of pipeline delay based request table
+ * @apply_data : pointer which various tables will update during traverse
+ * @in_q : input request queue pointer
+ * @validate_only : Whether to validate only and/or update settings
+ * @self_link : To indicate whether the check is for the given link or
+ * the other sync link
+ * @inject_delay_chk : if inject delay has been validated for all pd devices
+ * @open_req_cnt : Count of open requests yet to be serviced in the kernel.
+ */
+struct cam_req_mgr_traverse {
+ int32_t idx;
+ uint32_t result;
+ struct cam_req_mgr_req_tbl *tbl;
+ struct cam_req_mgr_apply *apply_data;
+ struct cam_req_mgr_req_queue *in_q;
+ bool validate_only;
+ bool self_link;
+ bool inject_delay_chk;
+ int32_t open_req_cnt;
+};
+
+/**
+ * struct cam_req_mgr_apply
+ * @idx : corresponding input queue slot index
+ * @pd : pipeline delay of device
+ * @req_id : req id for dev with above pd to process
+ * @skip_idx: skip applying settings when this is set.
+ */
+struct cam_req_mgr_apply {
+ int32_t idx;
+ int32_t pd;
+ int64_t req_id;
+ int32_t skip_idx;
+};
+
+/**
+ * struct cam_req_mgr_tbl_slot
+ * @idx : slot index
+ * @req_ready_map : mask tracking which all devices have request ready
+ * @state : state machine for life cycle of a slot
+ * @inject_delay : insert extra bubbling for flash type of use cases
+ * @dev_hdl : stores the dev_hdl, who is having higher inject delay
+ * @skip_next_frame : flag to drop the frame after skip_before_apply frame
+ * @is_applied : flag to identify if request is already applied to
+ * device.
+ */
+struct cam_req_mgr_tbl_slot {
+ int32_t idx;
+ uint32_t req_ready_map;
+ enum crm_req_state state;
+ uint32_t inject_delay;
+ int32_t dev_hdl;
+ bool skip_next_frame;
+ bool is_applied;
+};
+
+/**
+ * struct cam_req_mgr_req_tbl
+ * @id : table indetifier
+ * @pd : pipeline delay of table
+ * @dev_count : num of devices having same pipeline delay
+ * @dev_mask : mask to track which devices are linked
+ * @skip_traverse : to indicate how many traverses need to be dropped
+ * by this table especially in the beginning or bubble recovery
+ * @next : pointer to next pipeline delay request table
+ * @pd_delta : differnce between this table's pipeline delay and next
+ * @num_slots : number of request slots present in the table
+ * @slot : array of slots tracking requests availability at devices
+ */
+struct cam_req_mgr_req_tbl {
+ int32_t id;
+ int32_t pd;
+ int32_t dev_count;
+ int32_t dev_mask;
+ int32_t skip_traverse;
+ struct cam_req_mgr_req_tbl *next;
+ int32_t pd_delta;
+ int32_t num_slots;
+ struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS];
+};
+
+/**
+ * struct cam_req_mgr_slot
+ * - Internal Book keeping
+ * @idx : slot index
+ * @skip_idx : if req id in this slot needs to be skipped/not applied
+ * @status : state machine for life cycle of a slot
+ * - members updated due to external events
+ * @recover : if user enabled recovery for this request.
+ * @req_id : mask tracking which all devices have request ready
+ * @sync_mode : Sync mode in which req id in this slot has to applied
+ */
+struct cam_req_mgr_slot {
+ int32_t idx;
+ int32_t skip_idx;
+ enum crm_slot_status status;
+ int32_t recover;
+ int64_t req_id;
+ int32_t sync_mode;
+};
+
+/**
+ * struct cam_req_mgr_req_queue
+ * @num_slots : max num of input queue slots
+ * @slot : request slot holding incoming request id and bubble info.
+ * @rd_idx : indicates slot index currently in process.
+ * @wr_idx : indicates slot index to hold new upcoming req.
+ */
+struct cam_req_mgr_req_queue {
+ int32_t num_slots;
+ struct cam_req_mgr_slot slot[MAX_REQ_SLOTS];
+ int32_t rd_idx;
+ int32_t wr_idx;
+};
+
+/**
+ * struct cam_req_mgr_req_data
+ * @in_q : Poiner to Input request queue
+ * @l_tbl : unique pd request tables.
+ * @num_tbl : how many unique pd value devices are present
+ * @apply_data : Holds information about request id for a request
+ * @lock : mutex lock protecting request data ops.
+ */
+struct cam_req_mgr_req_data {
+ struct cam_req_mgr_req_queue *in_q;
+ struct cam_req_mgr_req_tbl *l_tbl;
+ int32_t num_tbl;
+ struct cam_req_mgr_apply apply_data[CAM_PIPELINE_DELAY_MAX];
+ struct mutex lock;
+};
+
+/**
+ * struct cam_req_mgr_connected_device
+ * - Device Properties
+ * @dev_hdl : device handle
+ * @dev_bit : unique bit assigned to device in link
+ * - Device characteristics
+ * @pd_tbl : tracks latest available req id at this device
+ * @dev_info : holds dev characteristics such as pipeline delay, dev name
+ * @ops : holds func pointer to call methods on this device
+ * @parent : pvt data - like link which this dev hdl belongs to
+ */
+struct cam_req_mgr_connected_device {
+ int32_t dev_hdl;
+ int64_t dev_bit;
+ struct cam_req_mgr_req_tbl *pd_tbl;
+ struct cam_req_mgr_device_info dev_info;
+ struct cam_req_mgr_kmd_ops *ops;
+ void *parent;
+};
+
+/**
+ * struct cam_req_mgr_core_link
+ * - Link Properties
+ * @link_hdl : Link identifier
+ * @num_devs : num of connected devices to this link
+ * @max_delay : Max of pipeline delay of all connected devs
+ * @workq : Pointer to handle workq related jobs
+ * @pd_mask : each set bit indicates the device with pd equal to
+ * bit position is available.
+ * - List of connected devices
+ * @l_dev : List of connected devices to this link
+ * - Request handling data struct
+ * @req : req data holder.
+ * - Timer
+ * @watchdog : watchdog timer to recover from sof freeze
+ * - Link private data
+ * @workq_comp : conditional variable to block user thread for workq
+ * to finish schedule request processing
+ * @state : link state machine
+ * @parent : pvt data - link's parent is session
+ * @lock : mutex lock to guard link data operations
+ * @link_state_spin_lock : spin lock to protect link state variable
+ * @subscribe_event : irqs that link subscribes, IFE should send
+ * notification to CRM at those hw events.
+ * @trigger_mask : mask on which irq the req is already applied
+ * @sync_link : pointer to the sync link for synchronization
+ * @sof_counter : sof counter during sync_mode
+ * @sync_self_ref : reference sync count against which the difference
+ * between sync_counts for a given link is checked
+ * @frame_skip_flag : flag that determines if a frame needs to be skipped
+ * @sync_link_sof_skip : flag determines if a pkt is not available for a given
+ * frame in a particular link skip corresponding
+ * frame in sync link as well.
+ * @open_req_cnt : Counter to keep track of open requests that are yet
+ * to be serviced in the kernel.
+ *
+ */
+struct cam_req_mgr_core_link {
+ int32_t link_hdl;
+ int32_t num_devs;
+ enum cam_pipeline_delay max_delay;
+ struct cam_req_mgr_core_workq *workq;
+ int32_t pd_mask;
+ struct cam_req_mgr_connected_device *l_dev;
+ struct cam_req_mgr_req_data req;
+ struct cam_req_mgr_timer *watchdog;
+ struct completion workq_comp;
+ enum cam_req_mgr_link_state state;
+ void *parent;
+ struct mutex lock;
+ spinlock_t link_state_spin_lock;
+ uint32_t subscribe_event;
+ uint32_t trigger_mask;
+ struct cam_req_mgr_core_link *sync_link;
+ int64_t sof_counter;
+ int64_t sync_self_ref;
+ bool frame_skip_flag;
+ bool sync_link_sof_skip;
+ int32_t open_req_cnt;
+};
+
+/**
+ * struct cam_req_mgr_core_session
+ * - Session Properties
+ * @session_hdl : session identifier
+ * @num_links : num of active links for current session
+ * - Links of this session
+ * @links : pointer to array of links within session
+ * @in_q : Input request queue one per session
+ * - Session private data
+ * @entry : pvt data - entry in the list of sessions
+ * @lock : pvt data - spin lock to guard session data
+ * - Debug data
+ * @force_err_recovery : For debugging, we can force bubble recovery
+ * to be always ON or always OFF using debugfs.
+ * @sync_mode : Sync mode for this session links
+ */
+struct cam_req_mgr_core_session {
+ int32_t session_hdl;
+ uint32_t num_links;
+ struct cam_req_mgr_core_link *links[MAXIMUM_LINKS_PER_SESSION];
+ struct list_head entry;
+ struct mutex lock;
+ int32_t force_err_recovery;
+ int32_t sync_mode;
+};
+
+/**
+ * struct cam_req_mgr_core_device
+ * - Core camera request manager data struct
+ * @session_head : list head holding sessions
+ * @crm_lock : mutex lock to protect session creation & destruction
+ */
+struct cam_req_mgr_core_device {
+ struct list_head session_head;
+ struct mutex crm_lock;
+};
+
+/**
+ * cam_req_mgr_create_session()
+ * @brief : creates session
+ * @ses_info : output param for session handle
+ *
+ * called as part of session creation.
+ */
+int cam_req_mgr_create_session(struct cam_req_mgr_session_info *ses_info);
+
+/**
+ * cam_req_mgr_destroy_session()
+ * @brief : destroy session
+ * @ses_info : session handle info, input param
+ *
+ * Called as part of session destroy
+ * return success/failure
+ */
+int cam_req_mgr_destroy_session(struct cam_req_mgr_session_info *ses_info);
+
+/**
+ * cam_req_mgr_link()
+ * @brief : creates a link for a session
+ * @link_info : handle and session info to create a link
+ *
+ * link is formed in a session for multiple devices. it creates
+ * a unique link handle for the link and is specific to a
+ * session. Returns link handle
+ */
+int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info);
+
+/**
+ * cam_req_mgr_unlink()
+ * @brief : destroy a link in a session
+ * @unlink_info : session and link handle info
+ *
+ * link is destroyed in a session
+ */
+int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info);
+
+/**
+ * cam_req_mgr_schedule_request()
+ * @brief: Request is scheduled
+ * @sched_req: request id, session and link id info, bubble recovery info
+ */
+int cam_req_mgr_schedule_request(
+ struct cam_req_mgr_sched_request *sched_req);
+
+/**
+ * cam_req_mgr_sync_mode_setup()
+ * @brief: sync for links in a session
+ * @sync_info: session, links info and master link info
+ */
+int cam_req_mgr_sync_config(struct cam_req_mgr_sync_mode *sync_info);
+
+/**
+ * cam_req_mgr_flush_requests()
+ * @brief: flush all requests
+ * @flush_info: requests related to link and session
+ */
+int cam_req_mgr_flush_requests(
+ struct cam_req_mgr_flush_info *flush_info);
+
+/**
+ * cam_req_mgr_core_device_init()
+ * @brief: initialize crm core
+ */
+int cam_req_mgr_core_device_init(void);
+
+/**
+ * cam_req_mgr_core_device_deinit()
+ * @brief: cleanp crm core
+ */
+int cam_req_mgr_core_device_deinit(void);
+
+/**
+ * cam_req_mgr_handle_core_shutdown()
+ * @brief: Handles camera close
+ */
+void cam_req_mgr_handle_core_shutdown(void);
+
+/**
+ * cam_req_mgr_link_control()
+ * @brief: Handles link control operations
+ * @control: Link control command
+ */
+int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core_defs.h
new file mode 100644
index 000000000000..852840188767
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_REQ_MGR_CORE_DEFS_H_
+#define _CAM_REQ_MGR_CORE_DEFS_H_
+
+#define CRM_TRACE_ENABLE 0
+#define CRM_DEBUG_MUTEX 0
+
+#define SET_SUCCESS_BIT(ret, pd) (ret |= (1 << (pd)))
+
+#define SET_FAILURE_BIT(ret, pd) (ret &= (~(1 << (pd))))
+
+#define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_debug.c b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_debug.c
new file mode 100644
index 000000000000..1a95a90ca4ba
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_debug.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_debug.h"
+
+#define MAX_SESS_INFO_LINE_BUFF_LEN 256
+
+static char sess_info_buffer[MAX_SESS_INFO_LINE_BUFF_LEN];
+
+static int cam_req_mgr_debug_set_bubble_recovery(void *data, u64 val)
+{
+ struct cam_req_mgr_core_device *core_dev = data;
+ struct cam_req_mgr_core_session *session;
+ int rc = 0;
+
+ mutex_lock(&core_dev->crm_lock);
+
+ if (!list_empty(&core_dev->session_head)) {
+ list_for_each_entry(session,
+ &core_dev->session_head, entry) {
+ session->force_err_recovery = val;
+ }
+ }
+
+ mutex_unlock(&core_dev->crm_lock);
+
+ return rc;
+}
+
+static int cam_req_mgr_debug_get_bubble_recovery(void *data, u64 *val)
+{
+ struct cam_req_mgr_core_device *core_dev = data;
+ struct cam_req_mgr_core_session *session;
+
+ mutex_lock(&core_dev->crm_lock);
+
+ if (!list_empty(&core_dev->session_head)) {
+ session = list_first_entry(&core_dev->session_head,
+ struct cam_req_mgr_core_session,
+ entry);
+ *val = session->force_err_recovery;
+ }
+ mutex_unlock(&core_dev->crm_lock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(bubble_recovery, cam_req_mgr_debug_get_bubble_recovery,
+ cam_req_mgr_debug_set_bubble_recovery, "%lld\n");
+
+static int session_info_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t session_info_read(struct file *t_file, char *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ char *out_buffer = sess_info_buffer;
+ char line_buffer[MAX_SESS_INFO_LINE_BUFF_LEN] = {0};
+ struct cam_req_mgr_core_device *core_dev =
+ (struct cam_req_mgr_core_device *) t_file->private_data;
+ struct cam_req_mgr_core_session *session;
+
+ memset(out_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+ mutex_lock(&core_dev->crm_lock);
+
+ if (!list_empty(&core_dev->session_head)) {
+ list_for_each_entry(session,
+ &core_dev->session_head, entry) {
+ snprintf(line_buffer, sizeof(line_buffer),
+ "session_hdl = %x \t"
+ "num_links = %d\n",
+ session->session_hdl, session->num_links);
+ strlcat(out_buffer, line_buffer,
+ sizeof(sess_info_buffer));
+ for (i = 0; i < session->num_links; i++) {
+ snprintf(line_buffer, sizeof(line_buffer),
+ "link_hdl[%d] = 0x%x, num_devs connected = %d\n",
+ i, session->links[i]->link_hdl,
+ session->links[i]->num_devs);
+ strlcat(out_buffer, line_buffer,
+ sizeof(sess_info_buffer));
+ }
+ }
+ }
+
+ mutex_unlock(&core_dev->crm_lock);
+
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t session_info_write(struct file *t_file,
+ const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ memset(sess_info_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+ return 0;
+}
+
+static const struct file_operations session_info = {
+ .open = session_info_open,
+ .read = session_info_read,
+ .write = session_info_write,
+};
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev)
+{
+ struct dentry *debugfs_root;
+ char dirname[32] = {0};
+
+ snprintf(dirname, sizeof(dirname), "cam_req_mgr");
+ debugfs_root = debugfs_create_dir(dirname, NULL);
+ if (!debugfs_root)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("sessions_info", 0644,
+ debugfs_root, core_dev, &session_info))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("bubble_recovery", 0644,
+ debugfs_root, core_dev, &bubble_recovery))
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_debug.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_debug.h
new file mode 100644
index 000000000000..6d3e744c9447
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_debug.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_DEBUG_H_
+#define _CAM_REQ_MGR_DEBUG_H_
+
+#include <linux/debugfs.h>
+#include "cam_req_mgr_core.h"
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_dev.c
new file mode 100644
index 000000000000..5cf1d844f5e2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_dev.c
@@ -0,0 +1,759 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_defs.h>
+#include "cam_req_mgr_dev.h"
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_core.h"
+#include "cam_subdev.h"
+#include "cam_mem_mgr.h"
+#include "cam_debug_util.h"
+#include "cam_common_util.h"
+#include <linux/slub_def.h>
+
+#define CAM_REQ_MGR_EVENT_MAX 30
+
+static struct cam_req_mgr_device g_dev;
+struct kmem_cache *g_cam_req_mgr_timer_cachep;
+
+static int cam_media_device_setup(struct device *dev)
+{
+ int rc;
+
+ g_dev.v4l2_dev->mdev = kzalloc(sizeof(*g_dev.v4l2_dev->mdev),
+ GFP_KERNEL);
+ if (!g_dev.v4l2_dev->mdev) {
+ rc = -ENOMEM;
+ goto mdev_fail;
+ }
+
+ media_device_init(g_dev.v4l2_dev->mdev);
+ g_dev.v4l2_dev->mdev->dev = dev;
+ strlcpy(g_dev.v4l2_dev->mdev->model, CAM_REQ_MGR_VNODE_NAME,
+ sizeof(g_dev.v4l2_dev->mdev->model));
+
+ rc = media_device_register(g_dev.v4l2_dev->mdev);
+ if (rc)
+ goto media_fail;
+
+ return rc;
+
+media_fail:
+ kfree(g_dev.v4l2_dev->mdev);
+ g_dev.v4l2_dev->mdev = NULL;
+mdev_fail:
+ return rc;
+}
+
+static void cam_media_device_cleanup(void)
+{
+ media_entity_cleanup(&g_dev.video->entity);
+ media_device_unregister(g_dev.v4l2_dev->mdev);
+ kfree(g_dev.v4l2_dev->mdev);
+ g_dev.v4l2_dev->mdev = NULL;
+}
+
+static int cam_v4l2_device_setup(struct device *dev)
+{
+ int rc;
+
+ g_dev.v4l2_dev = kzalloc(sizeof(*g_dev.v4l2_dev),
+ GFP_KERNEL);
+ if (!g_dev.v4l2_dev)
+ return -ENOMEM;
+
+ rc = v4l2_device_register(dev, g_dev.v4l2_dev);
+ if (rc)
+ goto reg_fail;
+
+ return rc;
+
+reg_fail:
+ kfree(g_dev.v4l2_dev);
+ g_dev.v4l2_dev = NULL;
+ return rc;
+}
+
+static void cam_v4l2_device_cleanup(void)
+{
+ v4l2_device_unregister(g_dev.v4l2_dev);
+ kfree(g_dev.v4l2_dev);
+ g_dev.v4l2_dev = NULL;
+}
+
+static int cam_req_mgr_open(struct file *filep)
+{
+ int rc;
+
+ mutex_lock(&g_dev.cam_lock);
+ if (g_dev.open_cnt >= 1) {
+ rc = -EALREADY;
+ goto end;
+ }
+
+ rc = v4l2_fh_open(filep);
+ if (rc) {
+ CAM_ERR(CAM_CRM, "v4l2_fh_open failed: %d", rc);
+ goto end;
+ }
+
+ spin_lock_bh(&g_dev.cam_eventq_lock);
+ g_dev.cam_eventq = filep->private_data;
+ spin_unlock_bh(&g_dev.cam_eventq_lock);
+
+ g_dev.open_cnt++;
+ rc = cam_mem_mgr_init();
+ if (rc) {
+ g_dev.open_cnt--;
+ CAM_ERR(CAM_CRM, "mem mgr init failed");
+ goto mem_mgr_init_fail;
+ }
+
+ mutex_unlock(&g_dev.cam_lock);
+ return rc;
+
+mem_mgr_init_fail:
+ v4l2_fh_release(filep);
+end:
+ mutex_unlock(&g_dev.cam_lock);
+ return rc;
+}
+
+static unsigned int cam_req_mgr_poll(struct file *f,
+ struct poll_table_struct *pll_table)
+{
+ int rc = 0;
+ struct v4l2_fh *eventq = f->private_data;
+
+ if (!eventq)
+ return -EINVAL;
+
+ poll_wait(f, &eventq->wait, pll_table);
+ if (v4l2_event_pending(eventq))
+ rc = POLLPRI;
+
+ return rc;
+}
+
+static int cam_req_mgr_close(struct file *filep)
+{
+ struct v4l2_subdev *sd;
+ struct v4l2_fh *vfh = filep->private_data;
+ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+
+ mutex_lock(&g_dev.cam_lock);
+
+ if (g_dev.open_cnt <= 0) {
+ mutex_unlock(&g_dev.cam_lock);
+ return -EINVAL;
+ }
+
+ cam_req_mgr_handle_core_shutdown();
+
+ list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) {
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+ continue;
+ if (sd->internal_ops && sd->internal_ops->close) {
+ CAM_DBG(CAM_CRM, "Invoke subdev close for device %s",
+ sd->name);
+ sd->internal_ops->close(sd, subdev_fh);
+ }
+ }
+
+ g_dev.open_cnt--;
+ v4l2_fh_release(filep);
+
+ spin_lock_bh(&g_dev.cam_eventq_lock);
+ g_dev.cam_eventq = NULL;
+ spin_unlock_bh(&g_dev.cam_eventq_lock);
+
+ cam_req_mgr_util_free_hdls();
+ cam_mem_mgr_deinit();
+ mutex_unlock(&g_dev.cam_lock);
+
+ return 0;
+}
+
+static struct v4l2_file_operations g_cam_fops = {
+ .owner = THIS_MODULE,
+ .open = cam_req_mgr_open,
+ .poll = cam_req_mgr_poll,
+ .release = cam_req_mgr_close,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = video_ioctl2,
+#endif
+};
+
+static int cam_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_subscribe(fh, sub, CAM_REQ_MGR_EVENT_MAX, NULL);
+}
+
+static int cam_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static long cam_private_ioctl(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ int rc;
+ struct cam_control *k_ioctl;
+
+ if ((!arg) || (cmd != VIDIOC_CAM_CONTROL))
+ return -EINVAL;
+
+ k_ioctl = (struct cam_control *)arg;
+
+ if (!k_ioctl->handle)
+ return -EINVAL;
+
+ switch (k_ioctl->op_code) {
+ case CAM_REQ_MGR_CREATE_SESSION: {
+ struct cam_req_mgr_session_info ses_info;
+
+ if (k_ioctl->size != sizeof(ses_info))
+ return -EINVAL;
+
+ if (copy_from_user(&ses_info,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_req_mgr_session_info))) {
+ return -EFAULT;
+ }
+
+ rc = cam_req_mgr_create_session(&ses_info);
+ if (!rc)
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->handle),
+ &ses_info,
+ sizeof(struct cam_req_mgr_session_info)))
+ rc = -EFAULT;
+ }
+ break;
+
+ case CAM_REQ_MGR_DESTROY_SESSION: {
+ struct cam_req_mgr_session_info ses_info;
+
+ if (k_ioctl->size != sizeof(ses_info))
+ return -EINVAL;
+
+ if (copy_from_user(&ses_info,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_req_mgr_session_info))) {
+ return -EFAULT;
+ }
+
+ rc = cam_req_mgr_destroy_session(&ses_info);
+ }
+ break;
+
+ case CAM_REQ_MGR_LINK: {
+ struct cam_req_mgr_link_info link_info;
+
+ if (k_ioctl->size != sizeof(link_info))
+ return -EINVAL;
+
+ if (copy_from_user(&link_info,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_req_mgr_link_info))) {
+ return -EFAULT;
+ }
+
+ rc = cam_req_mgr_link(&link_info);
+ if (!rc)
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->handle),
+ &link_info,
+ sizeof(struct cam_req_mgr_link_info)))
+ rc = -EFAULT;
+ }
+ break;
+
+ case CAM_REQ_MGR_UNLINK: {
+ struct cam_req_mgr_unlink_info unlink_info;
+
+ if (k_ioctl->size != sizeof(unlink_info))
+ return -EINVAL;
+
+ if (copy_from_user(&unlink_info,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_req_mgr_unlink_info))) {
+ return -EFAULT;
+ }
+
+ rc = cam_req_mgr_unlink(&unlink_info);
+ }
+ break;
+
+ case CAM_REQ_MGR_SCHED_REQ: {
+ struct cam_req_mgr_sched_request sched_req;
+
+ if (k_ioctl->size != sizeof(sched_req))
+ return -EINVAL;
+
+ if (copy_from_user(&sched_req,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_req_mgr_sched_request))) {
+ return -EFAULT;
+ }
+
+ rc = cam_req_mgr_schedule_request(&sched_req);
+ }
+ break;
+
+ case CAM_REQ_MGR_FLUSH_REQ: {
+ struct cam_req_mgr_flush_info flush_info;
+
+ if (k_ioctl->size != sizeof(flush_info))
+ return -EINVAL;
+
+ if (copy_from_user(&flush_info,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_req_mgr_flush_info))) {
+ return -EFAULT;
+ }
+
+ rc = cam_req_mgr_flush_requests(&flush_info);
+ }
+ break;
+
+ case CAM_REQ_MGR_SYNC_MODE: {
+ struct cam_req_mgr_sync_mode sync_info;
+
+ if (k_ioctl->size != sizeof(sync_info))
+ return -EINVAL;
+
+ if (copy_from_user(&sync_info,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_req_mgr_sync_mode))) {
+ return -EFAULT;
+ }
+
+ rc = cam_req_mgr_sync_config(&sync_info);
+ }
+ break;
+ case CAM_REQ_MGR_ALLOC_BUF: {
+ struct cam_mem_mgr_alloc_cmd cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_mem_mgr_alloc_cmd))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_mem_mgr_alloc_and_map(&cmd);
+ if (!rc)
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->handle),
+ &cmd, sizeof(struct cam_mem_mgr_alloc_cmd))) {
+ rc = -EFAULT;
+ break;
+ }
+ }
+ break;
+ case CAM_REQ_MGR_MAP_BUF: {
+ struct cam_mem_mgr_map_cmd cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_mem_mgr_map_cmd))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_mem_mgr_map(&cmd);
+ if (!rc)
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->handle),
+ &cmd, sizeof(struct cam_mem_mgr_map_cmd))) {
+ rc = -EFAULT;
+ break;
+ }
+ }
+ break;
+ case CAM_REQ_MGR_RELEASE_BUF: {
+ struct cam_mem_mgr_release_cmd cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_mem_mgr_release_cmd))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_mem_mgr_release(&cmd);
+ }
+ break;
+ case CAM_REQ_MGR_CACHE_OPS: {
+ struct cam_mem_cache_ops_cmd cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_mem_cache_ops_cmd))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_mem_mgr_cache_ops(&cmd);
+ if (rc)
+ rc = -EINVAL;
+ }
+ break;
+ case CAM_REQ_MGR_LINK_CONTROL: {
+ struct cam_req_mgr_link_control cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ u64_to_user_ptr(k_ioctl->handle),
+ sizeof(struct cam_req_mgr_link_control))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_req_mgr_link_control(&cmd);
+ if (rc)
+ rc = -EINVAL;
+ }
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return rc;
+}
+
+static const struct v4l2_ioctl_ops g_cam_ioctl_ops = {
+ .vidioc_subscribe_event = cam_subscribe_event,
+ .vidioc_unsubscribe_event = cam_unsubscribe_event,
+ .vidioc_default = cam_private_ioctl,
+};
+
+static int cam_video_device_setup(void)
+{
+ int rc;
+
+ g_dev.video = video_device_alloc();
+ if (!g_dev.video) {
+ rc = -ENOMEM;
+ goto video_fail;
+ }
+
+ g_dev.video->v4l2_dev = g_dev.v4l2_dev;
+
+ strlcpy(g_dev.video->name, "cam-req-mgr",
+ sizeof(g_dev.video->name));
+ g_dev.video->release = video_device_release;
+ g_dev.video->fops = &g_cam_fops;
+ g_dev.video->ioctl_ops = &g_cam_ioctl_ops;
+ g_dev.video->minor = -1;
+ g_dev.video->vfl_type = VFL_TYPE_GRABBER;
+ rc = video_register_device(g_dev.video, VFL_TYPE_GRABBER, -1);
+ if (rc)
+ goto v4l2_fail;
+
+ rc = media_entity_pads_init(&g_dev.video->entity, 0, NULL);
+ if (rc)
+ goto entity_fail;
+
+ g_dev.video->entity.function = CAM_VNODE_DEVICE_TYPE;
+ g_dev.video->entity.name = video_device_node_name(g_dev.video);
+
+ return rc;
+
+entity_fail:
+ video_unregister_device(g_dev.video);
+v4l2_fail:
+ video_device_release(g_dev.video);
+ g_dev.video = NULL;
+video_fail:
+ return rc;
+}
+
+int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg,
+ uint32_t id,
+ uint32_t type)
+{
+ struct v4l2_event event;
+ struct cam_req_mgr_message *ev_header;
+
+ if (!msg)
+ return -EINVAL;
+
+ event.id = id;
+ event.type = type;
+ ev_header = CAM_REQ_MGR_GET_PAYLOAD_PTR(event,
+ struct cam_req_mgr_message);
+ memcpy(ev_header, msg, sizeof(struct cam_req_mgr_message));
+ v4l2_event_queue(g_dev.video, &event);
+
+ return 0;
+}
+EXPORT_SYMBOL(cam_req_mgr_notify_message);
+
+void cam_video_device_cleanup(void)
+{
+ video_unregister_device(g_dev.video);
+ video_device_release(g_dev.video);
+ g_dev.video = NULL;
+}
+
+void cam_register_subdev_fops(struct v4l2_file_operations *fops)
+{
+ *fops = v4l2_subdev_fops;
+}
+EXPORT_SYMBOL(cam_register_subdev_fops);
+
+int cam_register_subdev(struct cam_subdev *csd)
+{
+ struct v4l2_subdev *sd;
+ int rc;
+
+ if (g_dev.state != true) {
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
+ return -ENODEV;
+ }
+
+ if (!csd || !csd->name) {
+ CAM_ERR(CAM_CRM, "invalid arguments");
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_dev.dev_lock);
+ if ((g_dev.subdev_nodes_created) &&
+ (csd->sd_flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) {
+ CAM_ERR(CAM_CRM,
+ "dynamic node is not allowed, name: %s, type :%d",
+ csd->name, csd->ent_function);
+ rc = -EINVAL;
+ goto reg_fail;
+ }
+
+ sd = &csd->sd;
+ v4l2_subdev_init(sd, csd->ops);
+ sd->internal_ops = csd->internal_ops;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), csd->name);
+ v4l2_set_subdevdata(sd, csd->token);
+
+ sd->flags = csd->sd_flags;
+ sd->entity.num_pads = 0;
+ sd->entity.pads = NULL;
+ sd->entity.function = csd->ent_function;
+
+ rc = v4l2_device_register_subdev(g_dev.v4l2_dev, sd);
+ if (rc) {
+ CAM_ERR(CAM_CRM, "register subdev failed");
+ goto reg_fail;
+ }
+ g_dev.count++;
+
+reg_fail:
+ mutex_unlock(&g_dev.dev_lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_register_subdev);
+
+int cam_unregister_subdev(struct cam_subdev *csd)
+{
+ if (g_dev.state != true) {
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
+ return -ENODEV;
+ }
+
+ mutex_lock(&g_dev.dev_lock);
+ v4l2_device_unregister_subdev(&csd->sd);
+ g_dev.count--;
+ mutex_unlock(&g_dev.dev_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cam_unregister_subdev);
+
+static int cam_req_mgr_remove(struct platform_device *pdev)
+{
+ cam_req_mgr_core_device_deinit();
+ cam_req_mgr_util_deinit();
+ cam_media_device_cleanup();
+ cam_video_device_cleanup();
+ cam_v4l2_device_cleanup();
+ mutex_destroy(&g_dev.dev_lock);
+ g_dev.state = false;
+ g_dev.subdev_nodes_created = false;
+
+ return 0;
+}
+
+static int cam_req_mgr_probe(struct platform_device *pdev)
+{
+ int rc;
+
+ rc = cam_v4l2_device_setup(&pdev->dev);
+ if (rc)
+ return rc;
+
+ rc = cam_media_device_setup(&pdev->dev);
+ if (rc)
+ goto media_setup_fail;
+
+ rc = cam_video_device_setup();
+ if (rc)
+ goto video_setup_fail;
+
+ g_dev.open_cnt = 0;
+ mutex_init(&g_dev.cam_lock);
+ spin_lock_init(&g_dev.cam_eventq_lock);
+ g_dev.subdev_nodes_created = false;
+ mutex_init(&g_dev.dev_lock);
+
+ rc = cam_req_mgr_util_init();
+ if (rc) {
+ CAM_ERR(CAM_CRM, "cam req mgr util init is failed");
+ goto req_mgr_util_fail;
+ }
+
+ rc = cam_req_mgr_core_device_init();
+ if (rc) {
+ CAM_ERR(CAM_CRM, "core device setup failed");
+ goto req_mgr_core_fail;
+ }
+
+ g_dev.state = true;
+
+ if (g_cam_req_mgr_timer_cachep == NULL) {
+ g_cam_req_mgr_timer_cachep = kmem_cache_create("crm_timer",
+ sizeof(struct cam_req_mgr_timer), 64,
+ SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE |
+ SLAB_POISON | SLAB_STORE_USER, NULL);
+ if (!g_cam_req_mgr_timer_cachep)
+ CAM_ERR(CAM_CRM,
+ "Failed to create kmem_cache for crm_timer");
+ else
+ CAM_DBG(CAM_CRM, "Name : %s",
+ g_cam_req_mgr_timer_cachep->name);
+ }
+
+ return rc;
+
+req_mgr_core_fail:
+ cam_req_mgr_util_deinit();
+req_mgr_util_fail:
+ mutex_destroy(&g_dev.dev_lock);
+ mutex_destroy(&g_dev.cam_lock);
+ cam_video_device_cleanup();
+video_setup_fail:
+ cam_media_device_cleanup();
+media_setup_fail:
+ cam_v4l2_device_cleanup();
+ return rc;
+}
+
+static const struct of_device_id cam_req_mgr_dt_match[] = {
+ {.compatible = "qcom,cam-req-mgr"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_dt_match);
+
+static struct platform_driver cam_req_mgr_driver = {
+ .probe = cam_req_mgr_probe,
+ .remove = cam_req_mgr_remove,
+ .driver = {
+ .name = "cam_req_mgr",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_req_mgr_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+int cam_dev_mgr_create_subdev_nodes(void)
+{
+ int rc;
+ struct v4l2_subdev *sd;
+
+ if (!g_dev.v4l2_dev)
+ return -EINVAL;
+
+ if (g_dev.state != true) {
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
+ return -ENODEV;
+ }
+
+ mutex_lock(&g_dev.dev_lock);
+ if (g_dev.subdev_nodes_created) {
+ rc = -EEXIST;
+ goto create_fail;
+ }
+
+ rc = v4l2_device_register_subdev_nodes(g_dev.v4l2_dev);
+ if (rc) {
+ CAM_ERR(CAM_CRM, "failed to register the sub devices");
+ goto create_fail;
+ }
+
+ list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) {
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+ continue;
+ sd->entity.name = video_device_node_name(sd->devnode);
+ CAM_DBG(CAM_CRM, "created node :%s", sd->entity.name);
+ }
+
+ g_dev.subdev_nodes_created = true;
+
+create_fail:
+ mutex_unlock(&g_dev.dev_lock);
+ return rc;
+}
+
+static int __init cam_req_mgr_init(void)
+{
+ return platform_driver_register(&cam_req_mgr_driver);
+}
+
+static int __init cam_req_mgr_late_init(void)
+{
+ return cam_dev_mgr_create_subdev_nodes();
+}
+
+static void __exit cam_req_mgr_exit(void)
+{
+ platform_driver_unregister(&cam_req_mgr_driver);
+}
+
+module_init(cam_req_mgr_init);
+late_initcall(cam_req_mgr_late_init);
+module_exit(cam_req_mgr_exit);
+MODULE_DESCRIPTION("Camera Request Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_dev.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_dev.h
new file mode 100644
index 000000000000..fdb66bc5592a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_dev.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_DEV_H_
+#define _CAM_REQ_MGR_DEV_H_
+
+/**
+ * struct cam_req_mgr_device - a camera request manager device
+ *
+ * @video: pointer to struct video device.
+ * @v4l2_dev: pointer to struct v4l2 device.
+ * @subdev_nodes_created: flag to check the created state.
+ * @count: number of subdevices registered.
+ * @dev_lock: lock for the subdevice count.
+ * @state: state of the root device.
+ * @open_cnt: open count of subdev
+ * @cam_lock: per file handle lock
+ * @cam_eventq: event queue
+ * @cam_eventq_lock: lock for event queue
+ */
+struct cam_req_mgr_device {
+ struct video_device *video;
+ struct v4l2_device *v4l2_dev;
+ bool subdev_nodes_created;
+ int count;
+ struct mutex dev_lock;
+ bool state;
+ int32_t open_cnt;
+ struct mutex cam_lock;
+ struct v4l2_fh *cam_eventq;
+ spinlock_t cam_eventq_lock;
+};
+
+#define CAM_REQ_MGR_GET_PAYLOAD_PTR(ev, type) \
+ (type *)((char *)ev.u.data)
+
+int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg,
+ uint32_t id,
+ uint32_t type);
+
+#endif /* _CAM_REQ_MGR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_interface.h
new file mode 100644
index 000000000000..1df3122109e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_interface.h
@@ -0,0 +1,339 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_INTERFACE_H
+#define _CAM_REQ_MGR_INTERFACE_H
+
+#include <linux/types.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_core_defs.h"
+#include "cam_req_mgr_util.h"
+
+struct cam_req_mgr_trigger_notify;
+struct cam_req_mgr_error_notify;
+struct cam_req_mgr_add_request;
+struct cam_req_mgr_device_info;
+struct cam_req_mgr_core_dev_link_setup;
+struct cam_req_mgr_apply_request;
+struct cam_req_mgr_flush_request;
+struct cam_req_mgr_link_evt_data;
+
+#define SKIP_NEXT_FRAME 0x100
+
+/* Request Manager -- camera device driver interface */
+/**
+ * @brief: camera kernel drivers to cam req mgr communication
+ *
+ * @cam_req_mgr_notify_trigger: for device which generates trigger to inform CRM
+ * @cam_req_mgr_notify_err : device use this to inform about different errors
+ * @cam_req_mgr_add_req : to info CRm about new rqeuest received from
+ * userspace
+ */
+typedef int (*cam_req_mgr_notify_trigger)(
+ struct cam_req_mgr_trigger_notify *);
+typedef int (*cam_req_mgr_notify_err)(struct cam_req_mgr_error_notify *);
+typedef int (*cam_req_mgr_add_req)(struct cam_req_mgr_add_request *);
+
+/**
+ * @brief: cam req mgr to camera device drivers
+ *
+ * @cam_req_mgr_get_dev_info: to fetch details about device linked
+ * @cam_req_mgr_link_setup : to establish link with device for a session
+ * @cam_req_mgr_notify_err : to broadcast error happened on link for request id
+ * @cam_req_mgr_apply_req : CRM asks device to apply certain request id.
+ * @cam_req_mgr_flush_req : Flush or cancle request
+ * cam_req_mgr_process_evt : generic events
+ */
+typedef int (*cam_req_mgr_get_dev_info) (struct cam_req_mgr_device_info *);
+typedef int (*cam_req_mgr_link_setup)(
+ struct cam_req_mgr_core_dev_link_setup *);
+typedef int (*cam_req_mgr_apply_req)(struct cam_req_mgr_apply_request *);
+typedef int (*cam_req_mgr_flush_req)(struct cam_req_mgr_flush_request *);
+typedef int (*cam_req_mgr_process_evt)(struct cam_req_mgr_link_evt_data *);
+
+/**
+ * @brief : cam_req_mgr_crm_cb - func table
+ *
+ * @notify_trigger : payload for trigger indication event
+ * @notify_err : payload for different error occurred at device
+ * @add_req : payload to inform which device and what request is received
+ */
+struct cam_req_mgr_crm_cb {
+ cam_req_mgr_notify_trigger notify_trigger;
+ cam_req_mgr_notify_err notify_err;
+ cam_req_mgr_add_req add_req;
+};
+
+/**
+ * @brief : cam_req_mgr_kmd_ops - func table
+ *
+ * @get_dev_info : payload to fetch device details
+ * @link_setup : payload to establish link with device
+ * @apply_req : payload to apply request id on a device linked
+ * @flush_req : payload to flush request
+ * @process_evt : payload to generic event
+ */
+struct cam_req_mgr_kmd_ops {
+ cam_req_mgr_get_dev_info get_dev_info;
+ cam_req_mgr_link_setup link_setup;
+ cam_req_mgr_apply_req apply_req;
+ cam_req_mgr_flush_req flush_req;
+ cam_req_mgr_process_evt process_evt;
+};
+
+/**
+ * enum cam_pipeline_delay
+ * @brief : enumerator for different pipeline delays in camera
+ *
+ * @DELAY_0 : device processed settings on same frame
+ * @DELAY_1 : device processed settings after 1 frame
+ * @DELAY_2 : device processed settings after 2 frames
+ * @DELAY_MAX : maximum supported pipeline delay
+ */
+enum cam_pipeline_delay {
+ CAM_PIPELINE_DELAY_0,
+ CAM_PIPELINE_DELAY_1,
+ CAM_PIPELINE_DELAY_2,
+ CAM_PIPELINE_DELAY_MAX,
+};
+
+/**
+ * @CAM_TRIGGER_POINT_SOF : Trigger point for SOF
+ * @CAM_TRIGGER_POINT_EOF : Trigger point for EOF
+ */
+#define CAM_TRIGGER_POINT_SOF (1 << 0)
+#define CAM_TRIGGER_POINT_EOF (1 << 1)
+
+/**
+ * enum cam_req_status
+ * @brief : enumerator for request status
+ *
+ * @SUCCESS : device processed settings successfully
+ * @FAILED : device processed settings failed
+ * @MAX : invalid status value
+ */
+enum cam_req_status {
+ CAM_REQ_STATUS_SUCCESS,
+ CAM_REQ_STATUS_FAILED,
+ CAM_REQ_STATUS_MAX,
+};
+
+/**
+ * enum cam_req_mgr_device_error
+ * @brief : enumerator for different errors occurred at device
+ *
+ * @NOT_FOUND : settings asked by request manager is not found
+ * @BUBBLE : device hit timing issue and is able to recover
+ * @FATAL : device is in bad shape and can not recover from error
+ * @PAGE_FAULT : Page fault while accessing memory
+ * @OVERFLOW : Bus Overflow for IFE/VFE
+ * @TIMEOUT : Timeout from cci or bus.
+ * @MAX : Invalid error value
+ */
+enum cam_req_mgr_device_error {
+ CRM_KMD_ERR_NOT_FOUND,
+ CRM_KMD_ERR_BUBBLE,
+ CRM_KMD_ERR_FATAL,
+ CRM_KMD_ERR_PAGE_FAULT,
+ CRM_KMD_ERR_OVERFLOW,
+ CRM_KMD_ERR_TIMEOUT,
+ CRM_KMD_ERR_MAX,
+};
+
+/**
+ * enum cam_req_mgr_device_id
+ * @brief : enumerator for different devices in subsystem
+ *
+ * @CAM_REQ_MGR : request manager itself
+ * @SENSOR : sensor device
+ * @FLASH : LED flash or dual LED device
+ * @ACTUATOR : lens mover
+ * @IFE : Image processing device
+ * @EXTERNAL_1 : third party device
+ * @EXTERNAL_2 : third party device
+ * @EXTERNAL_3 : third party device
+ * @MAX : invalid device id
+ */
+enum cam_req_mgr_device_id {
+ CAM_REQ_MGR_DEVICE,
+ CAM_REQ_MGR_DEVICE_SENSOR,
+ CAM_REQ_MGR_DEVICE_FLASH,
+ CAM_REQ_MGR_DEVICE_ACTUATOR,
+ CAM_REQ_MGR_DEVICE_IFE,
+ CAM_REQ_MGR_DEVICE_EXTERNAL_1,
+ CAM_REQ_MGR_DEVICE_EXTERNAL_2,
+ CAM_REQ_MGR_DEVICE_EXTERNAL_3,
+ CAM_REQ_MGR_DEVICE_ID_MAX,
+};
+
+/* Camera device driver to Req Mgr device interface */
+
+/**
+ * enum cam_req_mgr_link_evt_type
+ * @CAM_REQ_MGR_LINK_EVT_ERR : error on the link from any of the
+ * connected devices
+ * @CAM_REQ_MGR_LINK_EVT_PAUSE : to pause the link
+ * @CAM_REQ_MGR_LINK_EVT_RESUME : resumes the link which was paused
+ * @CAM_REQ_MGR_LINK_EVT_SOF_FREEZE : request manager has detected an sof freeze
+ * @CAM_REQ_MGR_LINK_EVT_MAX : invalid event type
+ */
+enum cam_req_mgr_link_evt_type {
+ CAM_REQ_MGR_LINK_EVT_ERR,
+ CAM_REQ_MGR_LINK_EVT_PAUSE,
+ CAM_REQ_MGR_LINK_EVT_RESUME,
+ CAM_REQ_MGR_LINK_EVT_SOF_FREEZE,
+ CAM_REQ_MGR_LINK_EVT_MAX,
+};
+
+/**
+ * struct cam_req_mgr_trigger_notify
+ * @link_hdl : link identifier
+ * @dev_hdl : device handle which has sent this req id
+ * @frame_id : frame id for internal tracking
+ * @trigger : trigger point of this notification, CRM will send apply
+ * only to the devices which subscribe to this point.
+ */
+struct cam_req_mgr_trigger_notify {
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ int64_t frame_id;
+ uint32_t trigger;
+};
+
+/**
+ * struct cam_req_mgr_error_notify
+ * @link_hdl : link identifier
+ * @dev_hdl : device handle which has sent this req id
+ * @req_id : req id which hit error
+ * @error : what error device hit while processing this req
+ */
+struct cam_req_mgr_error_notify {
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
+ enum cam_req_mgr_device_error error;
+};
+
+/**
+ * struct cam_req_mgr_add_request
+ * @link_hdl : link identifier
+ * @dev_hdl : device handle which has sent this req id
+ * @req_id : req id which device is ready to process
+ * @skip_before_applying : before applying req mgr introduce bubble
+ * by not sending request to device/s.
+ * ex: IFE and Flash
+ */
+struct cam_req_mgr_add_request {
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
+ uint32_t skip_before_applying;
+};
+
+
+/* CRM to KMD devices */
+/**
+ * struct cam_req_mgr_device_info
+ * @dev_hdl : Input_param : device handle for reference
+ * @name : link link or unlink
+ * @dev_id : device id info
+ * @p_delay : delay between time settings applied and take effect
+ * @trigger : Trigger point for the client
+ *
+ */
+struct cam_req_mgr_device_info {
+ int32_t dev_hdl;
+ char name[256];
+ enum cam_req_mgr_device_id dev_id;
+ enum cam_pipeline_delay p_delay;
+ uint32_t trigger;
+};
+
+/**
+ * struct cam_req_mgr_core_dev_link_setup
+ * @link_enable : link link or unlink
+ * @link_hdl : link identifier
+ * @dev_hdl : device handle for reference
+ * @max_delay : max pipeline delay on this link
+ * @crm_cb : callback funcs to communicate with req mgr
+ * @subscribe_event : the mask of trigger points this link subscribes
+ *
+ */
+struct cam_req_mgr_core_dev_link_setup {
+ int32_t link_enable;
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ enum cam_pipeline_delay max_delay;
+ struct cam_req_mgr_crm_cb *crm_cb;
+ uint32_t subscribe_event;
+};
+
+/**
+ * struct cam_req_mgr_apply_request
+ * @link_hdl : link identifier
+ * @dev_hdl : device handle for cross check
+ * @request_id : request id settings to apply
+ * @report_if_bubble : report to crm if failure in applying
+ * @trigger_point : the trigger point of this apply
+ *
+ */
+struct cam_req_mgr_apply_request {
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t request_id;
+ int32_t report_if_bubble;
+ uint32_t trigger_point;
+};
+
+/**
+ * struct cam_req_mgr_flush_request
+ * @link_hdl : link identifier
+ * @dev_hdl : device handle for cross check
+ * @type : cancel request type flush all or a request
+ * @request_id : request id to cancel
+ *
+ */
+struct cam_req_mgr_flush_request {
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint32_t type;
+ uint64_t req_id;
+};
+
+/**
+ * struct cam_req_mgr_event_data
+ * @link_hdl : link handle
+ * @req_id : request id
+ *
+ */
+struct cam_req_mgr_link_evt_data {
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
+
+ enum cam_req_mgr_link_evt_type evt_type;
+ union {
+ enum cam_req_mgr_device_error error;
+ } u;
+};
+
+/**
+ * struct cam_req_mgr_send_request
+ * @link_hdl : link identifier
+ * @idx : slot idx
+ *
+ */
+struct cam_req_mgr_send_request {
+ int32_t link_hdl;
+ struct cam_req_mgr_req_queue *in_q;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_timer.c
new file mode 100644
index 000000000000..28f4dbdab742
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_timer.c
@@ -0,0 +1,99 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_timer.h"
+#include "cam_debug_util.h"
+
+void crm_timer_reset(struct cam_req_mgr_timer *crm_timer)
+{
+ if (!crm_timer)
+ return;
+ CAM_DBG(CAM_CRM, "Starting timer to fire in %d ms. (jiffies=%lu)\n",
+ crm_timer->expires, jiffies);
+ mod_timer(&crm_timer->sys_timer,
+ (jiffies + msecs_to_jiffies(crm_timer->expires)));
+}
+
+void crm_timer_callback(unsigned long data)
+{
+ struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+ if (!timer) {
+ CAM_ERR(CAM_CRM, "NULL timer");
+ return;
+ }
+ CAM_DBG(CAM_CRM, "timer %pK parent %pK", timer, timer->parent);
+ crm_timer_reset(timer);
+}
+
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+ int32_t expires)
+{
+ CAM_DBG(CAM_CRM, "new time %d", expires);
+ if (crm_timer) {
+ crm_timer->expires = expires;
+ crm_timer_reset(crm_timer);
+ }
+}
+
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+ int32_t expires, void *parent, void (*timer_cb)(unsigned long))
+{
+ int ret = 0;
+ struct cam_req_mgr_timer *crm_timer = NULL;
+
+ CAM_DBG(CAM_CRM, "init timer %d %pK", expires, *timer);
+ if (*timer == NULL) {
+ if (g_cam_req_mgr_timer_cachep) {
+ crm_timer = (struct cam_req_mgr_timer *)
+ kmem_cache_alloc(
+ g_cam_req_mgr_timer_cachep,
+ __GFP_ZERO | GFP_KERNEL);
+ if (!crm_timer) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ }
+
+ else {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ if (timer_cb != NULL)
+ crm_timer->timer_cb = timer_cb;
+ else
+ crm_timer->timer_cb = crm_timer_callback;
+
+ crm_timer->expires = expires;
+ crm_timer->parent = parent;
+ setup_timer(&crm_timer->sys_timer,
+ crm_timer->timer_cb, (unsigned long)crm_timer);
+ crm_timer_reset(crm_timer);
+ *timer = crm_timer;
+ } else {
+ CAM_WARN(CAM_CRM, "Timer already exists!!");
+ ret = -EINVAL;
+ }
+end:
+ return ret;
+}
+void crm_timer_exit(struct cam_req_mgr_timer **crm_timer)
+{
+ CAM_DBG(CAM_CRM, "destroy timer %pK @ %pK", *crm_timer, crm_timer);
+ if (*crm_timer) {
+ del_timer_sync(&(*crm_timer)->sys_timer);
+ if (g_cam_req_mgr_timer_cachep)
+ kmem_cache_free(g_cam_req_mgr_timer_cachep, *crm_timer);
+ *crm_timer = NULL;
+ }
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_timer.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_timer.h
new file mode 100644
index 000000000000..b3e473a56daa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_timer.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_TIMER_H_
+#define _CAM_REQ_MGR_TIMER_H_
+
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "cam_req_mgr_core_defs.h"
+
+/** struct cam_req_mgr_timer
+ * @expires : timeout value for timer
+ * @sys_timer : system timer variable
+ * @parent : priv data - link pointer
+ * @timer_cb : callback func which will be called when timeout expires
+ */
+struct cam_req_mgr_timer {
+ int32_t expires;
+ struct timer_list sys_timer;
+ void *parent;
+ void (*timer_cb)(unsigned long data);
+};
+
+/**
+ * crm_timer_modify()
+ * @brief : allows ser to modify expiry time.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+ int32_t expires);
+
+/**
+ * crm_timer_reset()
+ * @brief : destroys the timer allocated.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_reset(struct cam_req_mgr_timer *timer);
+
+/**
+ * crm_timer_init()
+ * @brief : create a new general purpose timer.
+ * timer utility takes care of allocating memory and deleting
+ * @timer : double pointer to new timer allocated
+ * @expires : Timeout value to fire callback
+ * @parent : void pointer which caller can use for book keeping
+ * @timer_cb : caller can chose to use its own callback function when
+ * timer fires the timeout. If no value is set timer util
+ * will use default.
+ */
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+ int32_t expires, void *parent, void (*timer_cb)(unsigned long));
+
+/**
+ * crm_timer_exit()
+ * @brief : destroys the timer allocated.
+ * @timer : timer pointer which will be freed
+ */
+void crm_timer_exit(struct cam_req_mgr_timer **timer);
+
+extern struct kmem_cache *g_cam_req_mgr_timer_cachep;
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util.c
new file mode 100644
index 000000000000..dda04f8e5164
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util.c
@@ -0,0 +1,340 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-REQ-MGR_UTIL %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util.h"
+#include "cam_debug_util.h"
+
+static struct cam_req_mgr_util_hdl_tbl *hdl_tbl;
+static DEFINE_SPINLOCK(hdl_tbl_lock);
+
+int cam_req_mgr_util_init(void)
+{
+ int rc = 0;
+ int bitmap_size;
+ static struct cam_req_mgr_util_hdl_tbl *hdl_tbl_local;
+
+ if (hdl_tbl) {
+ rc = -EINVAL;
+ CAM_ERR(CAM_CRM, "Hdl_tbl is already present");
+ goto hdl_tbl_check_failed;
+ }
+
+ hdl_tbl_local = kzalloc(sizeof(*hdl_tbl), GFP_KERNEL);
+ if (!hdl_tbl_local) {
+ rc = -ENOMEM;
+ goto hdl_tbl_alloc_failed;
+ }
+ spin_lock_bh(&hdl_tbl_lock);
+ if (hdl_tbl) {
+ spin_unlock_bh(&hdl_tbl_lock);
+ rc = -EEXIST;
+ kfree(hdl_tbl_local);
+ goto hdl_tbl_check_failed;
+ }
+ hdl_tbl = hdl_tbl_local;
+ spin_unlock_bh(&hdl_tbl_lock);
+
+ bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES) * sizeof(long);
+ hdl_tbl->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!hdl_tbl->bitmap) {
+ rc = -ENOMEM;
+ goto bitmap_alloc_fail;
+ }
+ hdl_tbl->bits = bitmap_size * BITS_PER_BYTE;
+
+ return rc;
+
+bitmap_alloc_fail:
+ kfree(hdl_tbl);
+ hdl_tbl = NULL;
+hdl_tbl_alloc_failed:
+hdl_tbl_check_failed:
+ return rc;
+}
+
+int cam_req_mgr_util_deinit(void)
+{
+ spin_lock_bh(&hdl_tbl_lock);
+ if (!hdl_tbl) {
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+ spin_unlock_bh(&hdl_tbl_lock);
+ return -EINVAL;
+ }
+
+ kfree(hdl_tbl->bitmap);
+ hdl_tbl->bitmap = NULL;
+ kfree(hdl_tbl);
+ hdl_tbl = NULL;
+ spin_unlock_bh(&hdl_tbl_lock);
+
+ return 0;
+}
+
+int cam_req_mgr_util_free_hdls(void)
+{
+ int i = 0;
+
+ spin_lock_bh(&hdl_tbl_lock);
+ if (!hdl_tbl) {
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+ spin_unlock_bh(&hdl_tbl_lock);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES; i++) {
+ if (hdl_tbl->hdl[i].state == HDL_ACTIVE) {
+ CAM_ERR(CAM_CRM, "Dev handle = %x session_handle = %x",
+ hdl_tbl->hdl[i].hdl_value,
+ hdl_tbl->hdl[i].session_hdl);
+ hdl_tbl->hdl[i].state = HDL_FREE;
+ clear_bit(i, hdl_tbl->bitmap);
+ }
+ }
+ bitmap_zero(hdl_tbl->bitmap, CAM_REQ_MGR_MAX_HANDLES);
+ spin_unlock_bh(&hdl_tbl_lock);
+
+ return 0;
+}
+
+static int32_t cam_get_free_handle_index(void)
+{
+ int idx;
+
+ idx = find_first_zero_bit(hdl_tbl->bitmap, hdl_tbl->bits);
+
+ if (idx >= CAM_REQ_MGR_MAX_HANDLES || idx < 0)
+ return -ENOSR;
+
+ set_bit(idx, hdl_tbl->bitmap);
+
+ return idx;
+}
+
+int32_t cam_create_session_hdl(void *priv)
+{
+ int idx;
+ int rand = 0;
+ int32_t handle = 0;
+
+ spin_lock_bh(&hdl_tbl_lock);
+ if (!hdl_tbl) {
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+ spin_unlock_bh(&hdl_tbl_lock);
+ return -EINVAL;
+ }
+
+ idx = cam_get_free_handle_index();
+ if (idx < 0) {
+ CAM_ERR(CAM_CRM, "Unable to create session handle");
+ spin_unlock_bh(&hdl_tbl_lock);
+ return idx;
+ }
+
+ get_random_bytes(&rand, CAM_REQ_MGR_RND1_BYTES);
+ handle = GET_DEV_HANDLE(rand, HDL_TYPE_SESSION, idx);
+ hdl_tbl->hdl[idx].session_hdl = handle;
+ hdl_tbl->hdl[idx].hdl_value = handle;
+ hdl_tbl->hdl[idx].type = HDL_TYPE_SESSION;
+ hdl_tbl->hdl[idx].state = HDL_ACTIVE;
+ hdl_tbl->hdl[idx].priv = priv;
+ hdl_tbl->hdl[idx].ops = NULL;
+ spin_unlock_bh(&hdl_tbl_lock);
+
+ return handle;
+}
+
+int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data)
+{
+ int idx;
+ int rand = 0;
+ int32_t handle;
+
+ spin_lock_bh(&hdl_tbl_lock);
+ if (!hdl_tbl) {
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+ spin_unlock_bh(&hdl_tbl_lock);
+ return -EINVAL;
+ }
+
+ idx = cam_get_free_handle_index();
+ if (idx < 0) {
+ CAM_ERR(CAM_CRM, "Unable to create device handle");
+ spin_unlock_bh(&hdl_tbl_lock);
+ return idx;
+ }
+
+ get_random_bytes(&rand, CAM_REQ_MGR_RND1_BYTES);
+ handle = GET_DEV_HANDLE(rand, HDL_TYPE_DEV, idx);
+ hdl_tbl->hdl[idx].session_hdl = hdl_data->session_hdl;
+ hdl_tbl->hdl[idx].hdl_value = handle;
+ hdl_tbl->hdl[idx].type = HDL_TYPE_DEV;
+ hdl_tbl->hdl[idx].state = HDL_ACTIVE;
+ hdl_tbl->hdl[idx].priv = hdl_data->priv;
+ hdl_tbl->hdl[idx].ops = hdl_data->ops;
+ spin_unlock_bh(&hdl_tbl_lock);
+
+ pr_debug("%s: handle = %x", __func__, handle);
+ return handle;
+}
+
+void *cam_get_device_priv(int32_t dev_hdl)
+{
+ int idx;
+ int type;
+ void *priv;
+
+ spin_lock_bh(&hdl_tbl_lock);
+ if (!hdl_tbl) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM, "Hdl tbl is NULL");
+ goto device_priv_fail;
+ }
+
+ idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
+ if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx");
+ goto device_priv_fail;
+ }
+
+ if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state");
+ goto device_priv_fail;
+ }
+
+ type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
+ if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type");
+ goto device_priv_fail;
+ }
+
+ if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
+ CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid hdl");
+ goto device_priv_fail;
+ }
+
+ priv = hdl_tbl->hdl[idx].priv;
+ spin_unlock_bh(&hdl_tbl_lock);
+
+ return priv;
+
+device_priv_fail:
+ spin_unlock_bh(&hdl_tbl_lock);
+ return NULL;
+}
+
+void *cam_get_device_ops(int32_t dev_hdl)
+{
+ int idx;
+ int type;
+ void *ops;
+
+ spin_lock_bh(&hdl_tbl_lock);
+ if (!hdl_tbl) {
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+ goto device_ops_fail;
+ }
+
+ idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
+ if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
+ CAM_ERR(CAM_CRM, "Invalid idx");
+ goto device_ops_fail;
+ }
+
+ if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
+ CAM_ERR(CAM_CRM, "Invalid state");
+ goto device_ops_fail;
+ }
+
+ type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
+ if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
+ CAM_ERR(CAM_CRM, "Invalid type");
+ goto device_ops_fail;
+ }
+
+ if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
+ CAM_ERR(CAM_CRM, "Invalid hdl");
+ goto device_ops_fail;
+ }
+
+ ops = hdl_tbl->hdl[idx].ops;
+ spin_unlock_bh(&hdl_tbl_lock);
+
+ return ops;
+
+device_ops_fail:
+ spin_unlock_bh(&hdl_tbl_lock);
+ return NULL;
+}
+
+static int cam_destroy_hdl(int32_t dev_hdl, int dev_hdl_type)
+{
+ int idx;
+ int type;
+
+ spin_lock_bh(&hdl_tbl_lock);
+ if (!hdl_tbl) {
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+ goto destroy_hdl_fail;
+ }
+
+ idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
+ if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
+ CAM_ERR(CAM_CRM, "Invalid idx %d", idx);
+ goto destroy_hdl_fail;
+ }
+
+ if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
+ CAM_ERR(CAM_CRM, "Invalid state");
+ goto destroy_hdl_fail;
+ }
+
+ type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
+ if (type != dev_hdl_type) {
+ CAM_ERR(CAM_CRM, "Invalid type %d, %d", type, dev_hdl_type);
+ goto destroy_hdl_fail;
+ }
+
+ if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
+ CAM_ERR(CAM_CRM, "Invalid hdl");
+ goto destroy_hdl_fail;
+ }
+
+ hdl_tbl->hdl[idx].state = HDL_FREE;
+ hdl_tbl->hdl[idx].ops = NULL;
+ hdl_tbl->hdl[idx].priv = NULL;
+ clear_bit(idx, hdl_tbl->bitmap);
+ spin_unlock_bh(&hdl_tbl_lock);
+
+ return 0;
+
+destroy_hdl_fail:
+ spin_unlock_bh(&hdl_tbl_lock);
+ return -EINVAL;
+}
+
+int cam_destroy_device_hdl(int32_t dev_hdl)
+{
+ return cam_destroy_hdl(dev_hdl, HDL_TYPE_DEV);
+}
+
+int cam_destroy_session_hdl(int32_t dev_hdl)
+{
+ return cam_destroy_hdl(dev_hdl, HDL_TYPE_SESSION);
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util.h
new file mode 100644
index 000000000000..7b8e3e601ed8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util.h
@@ -0,0 +1,172 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_UTIL_API_H_
+#define _CAM_REQ_MGR_UTIL_API_H_
+
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util_priv.h"
+
+/**
+ * state of a handle(session/device)
+ * @HDL_FREE: free handle
+ * @HDL_ACTIVE: active handles
+ */
+enum hdl_state {
+ HDL_FREE,
+ HDL_ACTIVE
+};
+
+/**
+ * handle type
+ * @HDL_TYPE_DEV: for device and link
+ * @HDL_TYPE_SESSION: for session
+ */
+enum hdl_type {
+ HDL_TYPE_DEV = 1,
+ HDL_TYPE_SESSION
+};
+
+/**
+ * struct handle
+ * @session_hdl: session handle
+ * @hdl_value: Allocated handle
+ * @type: session/device handle
+ * @state: free/used
+ * @ops: ops structure
+ * @priv: private data of a handle
+ */
+struct handle {
+ int32_t session_hdl;
+ uint32_t hdl_value;
+ enum hdl_type type;
+ enum hdl_state state;
+ void *ops;
+ void *priv;
+};
+
+/**
+ * struct cam_req_mgr_util_hdl_tbl
+ * @hdl: row of handles
+ * @bitmap: bit map to get free hdl row idx
+ * @bits: size of bit map in bits
+ */
+struct cam_req_mgr_util_hdl_tbl {
+ struct handle hdl[CAM_REQ_MGR_MAX_HANDLES];
+ void *bitmap;
+ size_t bits;
+};
+
+/**
+ * cam_req_mgr_util APIs for KMD drivers and cam_req_mgr
+ * @session_hdl: session_hdl info
+ * @v4l2_sub_dev_flag: flag to create v4l2 sub device
+ * @media_entity_flag: flag for media entity
+ * @reserved: reserved field
+ * @ops: ops pointer for a device handle
+ * @priv: private data for a device handle
+ */
+struct cam_create_dev_hdl {
+ int32_t session_hdl;
+ int32_t v4l2_sub_dev_flag;
+ int32_t media_entity_flag;
+ int32_t reserved;
+ void *ops;
+ void *priv;
+};
+
+/**
+ * cam_create_session_hdl() - create a session handle
+ * @priv: private data for a session handle
+ *
+ * cam_req_mgr core calls this function to get
+ * a unique session handle. Returns a unique session
+ * handle
+ */
+int32_t cam_create_session_hdl(void *priv);
+
+/**
+ * cam_create_device_hdl() - create a device handle
+ * @hdl_data: session hdl, flags, ops and priv dara as input
+ *
+ * cam_req_mgr_core calls this function to get
+ * session and link handles
+ * KMD drivers calls this function to create
+ * a device handle. Returns a unique device handle
+ */
+int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data);
+
+/**
+ * cam_get_device_priv() - get private data of a handle
+ * @dev_hdl: handle for a session/link/device
+ *
+ * cam_req_mgr_core and KMD drivers use this function to
+ * get private data of a handle. Returns a private data
+ * structure pointer.
+ */
+void *cam_get_device_priv(int32_t dev_hdl);
+
+/**
+ * cam_get_device_ops() - get ops of a handle
+ * @dev_hdl: handle for a session/link/device
+ *
+ * cam_req_mgr_core and KMD drivers use this function to
+ * get ops of a handle. Returns a pointer to ops.
+ */
+void *cam_get_device_ops(int32_t dev_hdl);
+
+/**
+ * cam_destroy_device_hdl() - destroy device handle
+ * @dev_hdl: handle for a link/device.
+ *
+ * Returns success/failure
+ */
+int32_t cam_destroy_device_hdl(int32_t dev_hdl);
+
+/**
+ * cam_destroy_session_hdl() - destroy device handle
+ * @dev_hdl: handle for a session
+ *
+ * Returns success/failure
+ */
+int32_t cam_destroy_session_hdl(int32_t dev_hdl);
+
+
+/* Internal functions */
+/**
+ * cam_req_mgr_util_init() - init function of cam_req_mgr_util
+ *
+ * This is called as part of probe function to initialize
+ * handle table, bitmap, locks
+ */
+int cam_req_mgr_util_init(void);
+
+/**
+ * cam_req_mgr_util_deinit() - deinit function of cam_req_mgr_util
+ *
+ * This function is called in case of probe failure
+ */
+int32_t cam_req_mgr_util_deinit(void);
+
+/**
+ * cam_req_mgr_util_free_hdls() - free handles in case of crash
+ *
+ * Called from cam_req_mgr_dev release function to make sure
+ * all data structures are cleaned to avoid leaks
+ *
+ * cam_req_mgr core can call this function at the end of
+ * camera to make sure all stale entries are printed and
+ * cleaned
+ */
+int32_t cam_req_mgr_util_free_hdls(void);
+
+#endif /* _CAM_REQ_MGR_UTIL_API_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util_priv.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util_priv.h
new file mode 100644
index 000000000000..a68c568c4f28
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_util_priv.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_UTIL_PRIV_H_
+#define _CAM_REQ_MGR_UTIL_PRIV_H_
+
+/**
+ * handle format:
+ * @bits (0-7): handle index
+ * @bits (8-11): handle type
+ * @bits (12-15): reserved
+ * @bits (16-23): random bits
+ * @bits (24-31): zeros
+ */
+
+#define CAM_REQ_MGR_HDL_SIZE 32
+#define CAM_REQ_MGR_RND1_SIZE 8
+#define CAM_REQ_MGR_RVD_SIZE 4
+#define CAM_REQ_MGR_HDL_TYPE_SIZE 4
+#define CAM_REQ_MGR_HDL_IDX_SIZE 8
+
+#define CAM_REQ_MGR_RND1_POS 24
+#define CAM_REQ_MGR_RVD_POS 16
+#define CAM_REQ_MGR_HDL_TYPE_POS 12
+
+#define CAM_REQ_MGR_RND1_BYTES 1
+
+#define CAM_REQ_MGR_HDL_TYPE_MASK ((1 << CAM_REQ_MGR_HDL_TYPE_SIZE) - 1)
+
+#define GET_DEV_HANDLE(rnd1, type, idx) \
+ ((rnd1 << (CAM_REQ_MGR_RND1_POS - CAM_REQ_MGR_RND1_SIZE)) | \
+ (0x0 << (CAM_REQ_MGR_RVD_POS - CAM_REQ_MGR_RVD_SIZE)) | \
+ (type << (CAM_REQ_MGR_HDL_TYPE_POS - CAM_REQ_MGR_HDL_TYPE_SIZE)) | \
+ (idx << (CAM_REQ_MGR_HDL_IDX_POS - CAM_REQ_MGR_HDL_IDX_SIZE))) \
+
+#define CAM_REQ_MGR_GET_HDL_IDX(hdl) (hdl & CAM_REQ_MGR_HDL_IDX_MASK)
+#define CAM_REQ_MGR_GET_HDL_TYPE(hdl) \
+ ((hdl >> CAM_REQ_MGR_HDL_IDX_POS) & CAM_REQ_MGR_HDL_TYPE_MASK)
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_workq.c
new file mode 100644
index 000000000000..12bc3ac47a8e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_workq.c
@@ -0,0 +1,277 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_req_mgr_workq.h"
+#include "cam_debug_util.h"
+
+#define WORKQ_ACQUIRE_LOCK(workq, flags) {\
+ if ((workq)->in_irq) \
+ spin_lock_irqsave(&(workq)->lock_bh, (flags)); \
+ else \
+ spin_lock_bh(&(workq)->lock_bh); \
+}
+
+#define WORKQ_RELEASE_LOCK(workq, flags) {\
+ if ((workq)->in_irq) \
+ spin_unlock_irqrestore(&(workq)->lock_bh, (flags)); \
+ else \
+ spin_unlock_bh(&(workq)->lock_bh); \
+}
+
+struct crm_workq_task *cam_req_mgr_workq_get_task(
+ struct cam_req_mgr_core_workq *workq)
+{
+ struct crm_workq_task *task = NULL;
+ unsigned long flags = 0;
+
+ if (!workq)
+ return NULL;
+
+ WORKQ_ACQUIRE_LOCK(workq, flags);
+ if (list_empty(&workq->task.empty_head))
+ goto end;
+
+ task = list_first_entry(&workq->task.empty_head,
+ struct crm_workq_task, entry);
+ if (task) {
+ atomic_sub(1, &workq->task.free_cnt);
+ list_del_init(&task->entry);
+ }
+
+end:
+ WORKQ_RELEASE_LOCK(workq, flags);
+
+ return task;
+}
+
+static void cam_req_mgr_workq_put_task(struct crm_workq_task *task)
+{
+ struct cam_req_mgr_core_workq *workq =
+ (struct cam_req_mgr_core_workq *)task->parent;
+ unsigned long flags = 0;
+
+ list_del_init(&task->entry);
+ task->cancel = 0;
+ task->process_cb = NULL;
+ task->priv = NULL;
+ WORKQ_ACQUIRE_LOCK(workq, flags);
+ list_add_tail(&task->entry,
+ &workq->task.empty_head);
+ atomic_add(1, &workq->task.free_cnt);
+ WORKQ_RELEASE_LOCK(workq, flags);
+}
+
+/**
+ * cam_req_mgr_process_task() - Process the enqueued task
+ * @task: pointer to task workq thread shall process
+ */
+static int cam_req_mgr_process_task(struct crm_workq_task *task)
+{
+ struct cam_req_mgr_core_workq *workq = NULL;
+
+ if (!task)
+ return -EINVAL;
+
+ workq = (struct cam_req_mgr_core_workq *)task->parent;
+ if (task->process_cb)
+ task->process_cb(task->priv, task->payload);
+ else
+ CAM_WARN(CAM_CRM, "FATAL:no task handler registered for workq");
+ cam_req_mgr_workq_put_task(task);
+
+ return 0;
+}
+
+/**
+ * cam_req_mgr_process_workq() - main loop handling
+ * @w: workqueue task pointer
+ */
+static void cam_req_mgr_process_workq(struct work_struct *w)
+{
+ struct cam_req_mgr_core_workq *workq = NULL;
+ struct crm_workq_task *task;
+ int32_t i = CRM_TASK_PRIORITY_0;
+ unsigned long flags = 0;
+
+ if (!w) {
+ CAM_ERR(CAM_CRM, "NULL task pointer can not schedule");
+ return;
+ }
+ workq = (struct cam_req_mgr_core_workq *)
+ container_of(w, struct cam_req_mgr_core_workq, work);
+
+ while (i < CRM_TASK_PRIORITY_MAX) {
+ WORKQ_ACQUIRE_LOCK(workq, flags);
+ while (!list_empty(&workq->task.process_head[i])) {
+ task = list_first_entry(&workq->task.process_head[i],
+ struct crm_workq_task, entry);
+ atomic_sub(1, &workq->task.pending_cnt);
+ list_del_init(&task->entry);
+ WORKQ_RELEASE_LOCK(workq, flags);
+ cam_req_mgr_process_task(task);
+ CAM_DBG(CAM_CRM, "processed task %pK free_cnt %d",
+ task, atomic_read(&workq->task.free_cnt));
+ WORKQ_ACQUIRE_LOCK(workq, flags);
+ }
+ WORKQ_RELEASE_LOCK(workq, flags);
+ i++;
+ }
+}
+
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+ void *priv, int32_t prio)
+{
+ int rc = 0;
+ struct cam_req_mgr_core_workq *workq = NULL;
+ unsigned long flags = 0;
+
+ if (!task) {
+ CAM_WARN(CAM_CRM, "NULL task pointer can not schedule");
+ rc = -EINVAL;
+ goto end;
+ }
+ workq = (struct cam_req_mgr_core_workq *)task->parent;
+ if (!workq) {
+ CAM_DBG(CAM_CRM, "NULL workq pointer suspect mem corruption");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (task->cancel == 1) {
+ cam_req_mgr_workq_put_task(task);
+ CAM_WARN(CAM_CRM, "task aborted and queued back to pool");
+ rc = 0;
+ goto end;
+ }
+ task->priv = priv;
+ task->priority =
+ (prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
+ ? prio : CRM_TASK_PRIORITY_0;
+
+ WORKQ_ACQUIRE_LOCK(workq, flags);
+ if (!workq->job) {
+ rc = -EINVAL;
+ WORKQ_RELEASE_LOCK(workq, flags);
+ goto end;
+ }
+
+ list_add_tail(&task->entry,
+ &workq->task.process_head[task->priority]);
+
+ atomic_add(1, &workq->task.pending_cnt);
+ CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
+ task, atomic_read(&workq->task.pending_cnt));
+
+ queue_work(workq->job, &workq->work);
+ WORKQ_RELEASE_LOCK(workq, flags);
+end:
+ return rc;
+}
+
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
+ struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq,
+ int flags)
+{
+ int32_t i, wq_flags = 0, max_active_tasks = 0;
+ struct crm_workq_task *task;
+ struct cam_req_mgr_core_workq *crm_workq = NULL;
+ char buf[128] = "crm_workq-";
+
+ if (!*workq) {
+ crm_workq = (struct cam_req_mgr_core_workq *)
+ kzalloc(sizeof(struct cam_req_mgr_core_workq),
+ GFP_KERNEL);
+ if (crm_workq == NULL)
+ return -ENOMEM;
+
+ wq_flags |= WQ_UNBOUND;
+ if (flags & CAM_WORKQ_FLAG_HIGH_PRIORITY)
+ wq_flags |= WQ_HIGHPRI;
+
+ if (flags & CAM_WORKQ_FLAG_SERIAL)
+ max_active_tasks = 1;
+
+ strlcat(buf, name, sizeof(buf));
+ CAM_DBG(CAM_CRM, "create workque crm_workq-%s", name);
+ crm_workq->job = alloc_workqueue(buf,
+ wq_flags, max_active_tasks, NULL);
+ if (!crm_workq->job) {
+ kfree(crm_workq);
+ return -ENOMEM;
+ }
+
+ /* Workq attributes initialization */
+ INIT_WORK(&crm_workq->work, cam_req_mgr_process_workq);
+ spin_lock_init(&crm_workq->lock_bh);
+ CAM_DBG(CAM_CRM, "LOCK_DBG workq %s lock %pK",
+ name, &crm_workq->lock_bh);
+
+ /* Task attributes initialization */
+ atomic_set(&crm_workq->task.pending_cnt, 0);
+ atomic_set(&crm_workq->task.free_cnt, 0);
+ for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
+ INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
+ INIT_LIST_HEAD(&crm_workq->task.empty_head);
+ crm_workq->in_irq = in_irq;
+ crm_workq->task.num_task = num_tasks;
+ crm_workq->task.pool = (struct crm_workq_task *)
+ kzalloc(sizeof(struct crm_workq_task) *
+ crm_workq->task.num_task,
+ GFP_KERNEL);
+ if (!crm_workq->task.pool) {
+ CAM_WARN(CAM_CRM, "Insufficient memory %zu",
+ sizeof(struct crm_workq_task) *
+ crm_workq->task.num_task);
+ kfree(crm_workq);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < crm_workq->task.num_task; i++) {
+ task = &crm_workq->task.pool[i];
+ task->parent = (void *)crm_workq;
+ /* Put all tasks in free pool */
+ INIT_LIST_HEAD(&task->entry);
+ cam_req_mgr_workq_put_task(task);
+ }
+ *workq = crm_workq;
+ CAM_DBG(CAM_CRM, "free tasks %d",
+ atomic_read(&crm_workq->task.free_cnt));
+ }
+
+ return 0;
+}
+
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
+{
+ unsigned long flags = 0;
+ struct workqueue_struct *job;
+
+ CAM_DBG(CAM_CRM, "destroy workque %pK", crm_workq);
+ if (*crm_workq) {
+ WORKQ_ACQUIRE_LOCK(*crm_workq, flags);
+ if ((*crm_workq)->job) {
+ job = (*crm_workq)->job;
+ (*crm_workq)->job = NULL;
+ WORKQ_RELEASE_LOCK(*crm_workq, flags);
+ destroy_workqueue(job);
+ } else {
+ WORKQ_RELEASE_LOCK(*crm_workq, flags);
+ }
+
+ /* Destroy workq payload data */
+ kfree((*crm_workq)->task.pool[0].payload);
+ (*crm_workq)->task.pool[0].payload = NULL;
+ kfree((*crm_workq)->task.pool);
+ kfree(*crm_workq);
+ *crm_workq = NULL;
+ }
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_workq.h
new file mode 100644
index 000000000000..af76ae467346
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_req_mgr_workq.h
@@ -0,0 +1,150 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_REQ_MGR_WORKQ_H_
+#define _CAM_REQ_MGR_WORKQ_H_
+
+#include<linux/kernel.h>
+#include<linux/module.h>
+#include<linux/init.h>
+#include<linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "cam_req_mgr_core.h"
+
+/* Flag to create a high priority workq */
+#define CAM_WORKQ_FLAG_HIGH_PRIORITY (1 << 0)
+
+/* This flag ensures only one task from a given
+ * workq will execute at any given point on any
+ * given CPU.
+ */
+#define CAM_WORKQ_FLAG_SERIAL (1 << 1)
+
+/* Task priorities, lower the number higher the priority*/
+enum crm_task_priority {
+ CRM_TASK_PRIORITY_0,
+ CRM_TASK_PRIORITY_1,
+ CRM_TASK_PRIORITY_MAX,
+};
+
+/* workqueue will be used from irq context or not */
+enum crm_workq_context {
+ CRM_WORKQ_USAGE_NON_IRQ,
+ CRM_WORKQ_USAGE_IRQ,
+ CRM_WORKQ_USAGE_INVALID,
+};
+
+/** struct crm_workq_task
+ * @priority : caller can assign priority to task based on type.
+ * @payload : depending of user of task this payload type will change
+ * @process_cb : registered callback called by workq when task enqueued is
+ * ready for processing in workq thread context
+ * @parent : workq's parent is link which is enqqueing taks to this workq
+ * @entry : list head of this list entry is worker's empty_head
+ * @cancel : if caller has got free task from pool but wants to abort
+ * or put back without using it
+ * @priv : when task is enqueuer caller can attach priv along which
+ * it will get in process callback
+ * @ret : return value in future to use for blocking calls
+ */
+struct crm_workq_task {
+ int32_t priority;
+ void *payload;
+ int32_t (*process_cb)(void *, void *);
+ void *parent;
+ struct list_head entry;
+ uint8_t cancel;
+ void *priv;
+ int32_t ret;
+};
+
+/** struct cam_req_mgr_core_workq
+ * @work : work token used by workqueue
+ * @job : workqueue internal job struct
+ * task -
+ * @lock_bh : lock for task structs
+ * @in_irq : set true if workque can be used in irq context
+ * @free_cnt : num of free/available tasks
+ * @empty_head : list head of available taska which can be used
+ * or acquired in order to enqueue a task to workq
+ * @pool : pool of tasks used for handling events in workq context
+ * @num_task : size of tasks pool
+ * -
+ */
+struct cam_req_mgr_core_workq {
+ struct work_struct work;
+ struct workqueue_struct *job;
+ spinlock_t lock_bh;
+ uint32_t in_irq;
+
+ /* tasks */
+ struct {
+ struct mutex lock;
+ atomic_t pending_cnt;
+ atomic_t free_cnt;
+
+ struct list_head process_head[CRM_TASK_PRIORITY_MAX];
+ struct list_head empty_head;
+ struct crm_workq_task *pool;
+ uint32_t num_task;
+ } task;
+};
+
+/**
+ * cam_req_mgr_workq_create()
+ * @brief : create a workqueue
+ * @name : Name of the workque to be allocated, it is combination
+ * of session handle and link handle
+ * @num_task : Num_tasks to be allocated for workq
+ * @workq : Double pointer worker
+ * @in_irq : Set to one if workq might be used in irq context
+ * @flags : Bitwise OR of Flags for workq behavior.
+ * e.g. CAM_REQ_MGR_WORKQ_HIGH_PRIORITY | CAM_REQ_MGR_WORKQ_SERIAL
+ * This function will allocate and create workqueue and pass
+ * the workq pointer to caller.
+ */
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
+ struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq,
+ int flags);
+
+/**
+ * cam_req_mgr_workq_destroy()
+ * @brief: destroy workqueue
+ * @workq: pointer to worker data struct
+ * this function will destroy workqueue and clean up resources
+ * associated with worker such as tasks.
+ */
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **workq);
+
+/**
+ * cam_req_mgr_workq_enqueue_task()
+ * @brief: Enqueue task in worker queue
+ * @task : task to be processed by worker
+ * @priv : clients private data
+ * @prio : task priority
+ * process callback func
+ */
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+ void *priv, int32_t prio);
+
+/**
+ * cam_req_mgr_workq_get_task()
+ * @brief: Returns empty task pointer for use
+ * @workq: workque used for processing
+ */
+struct crm_workq_task *cam_req_mgr_workq_get_task(
+ struct cam_req_mgr_core_workq *workq);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_subdev.h b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_subdev.h
new file mode 100644
index 000000000000..c9fa904e64af
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_req_mgr/cam_subdev.h
@@ -0,0 +1,115 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SUBDEV_H_
+#define _CAM_SUBDEV_H_
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#define CAM_SUBDEVICE_EVENT_MAX 30
+
+/**
+ * struct cam_subdev - describes a camera sub-device
+ *
+ * @pdev: Pointer to the platform device
+ * @sd: V4l2 subdevice
+ * @ops: V4l2 subdecie operations
+ * @internal_ops: V4l2 subdevice internal operations
+ * @name: Name of the sub-device. Please notice that the name
+ * must be unique.
+ * @sd_flags: Subdev flags. Can be:
+ * %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if
+ * this subdev needs a device node.
+ * %V4L2_SUBDEV_FL_HAS_EVENTS - Set this flag if
+ * this subdev generates events.
+ * @token: Pointer to cookie of the client driver
+ * @ent_function: Media entity function type. Can be:
+ * %CAM_IFE_DEVICE_TYPE - identifies as IFE device.
+ * %CAM_ICP_DEVICE_TYPE - identifies as ICP device.
+ *
+ * Each instance of a subdev driver should create this struct, either
+ * stand-alone or embedded in a larger struct. This structure should be
+ * initialized/registered by cam_register_subdev
+ *
+ */
+struct cam_subdev {
+ struct platform_device *pdev;
+ struct v4l2_subdev sd;
+ const struct v4l2_subdev_ops *ops;
+ const struct v4l2_subdev_internal_ops *internal_ops;
+ char *name;
+ u32 sd_flags;
+ void *token;
+ u32 ent_function;
+};
+
+/**
+ * cam_subdev_probe()
+ *
+ * @brief: Camera Subdevice node probe function for v4l2 setup
+ *
+ * @sd: Camera subdevice object
+ * @name: Name of the subdevice node
+ * @dev_type: Subdevice node type
+ *
+ */
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+ char *name, uint32_t dev_type);
+
+/**
+ * cam_subdev_remove()
+ *
+ * @brief: Called when subdevice node is unloaded
+ *
+ * @sd: Camera subdevice node object
+ *
+ */
+int cam_subdev_remove(struct cam_subdev *sd);
+
+/**
+ * cam_register_subdev_fops()
+ *
+ * @brief: This common utility function assigns subdev ops
+ *
+ * @fops: v4l file operations
+ */
+void cam_register_subdev_fops(struct v4l2_file_operations *fops);
+
+/**
+ * cam_register_subdev()
+ *
+ * @brief: This is the common utility function to be called by each camera
+ * subdevice node when it tries to register itself to the camera
+ * request manager
+ *
+ * @sd: Pointer to struct cam_subdev.
+ */
+int cam_register_subdev(struct cam_subdev *sd);
+
+/**
+ * cam_unregister_subdev()
+ *
+ * @brief: This is the common utility function to be called by each camera
+ * subdevice node when it tries to unregister itself from the
+ * camera request manger
+ *
+ * @sd: Pointer to struct cam_subdev.
+ */
+int cam_unregister_subdev(struct cam_subdev *sd);
+
+#endif /* _CAM_SUBDEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/Makefile
new file mode 100644
index 000000000000..65c23274e5ae
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_res_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_utils/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ois/
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/Makefile
new file mode 100644
index 000000000000..a350da70cc6d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator_dev.o cam_actuator_core.o cam_actuator_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_core.c
new file mode 100644
index 000000000000..b2a7dc0784f4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -0,0 +1,958 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <cam_sensor_cmn_header.h>
+#include "cam_actuator_core.h"
+#include "cam_sensor_util.h"
+#include "cam_trace.h"
+#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
+
+int32_t cam_actuator_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0;
+
+ power_info->power_setting_size = 1;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_setting)
+ return -ENOMEM;
+
+ power_info->power_setting[0].seq_type = SENSOR_VAF;
+ power_info->power_setting[0].seq_val = CAM_VAF;
+ power_info->power_setting[0].config_val = 1;
+ power_info->power_setting[0].delay = 2;
+
+ power_info->power_down_setting_size = 1;
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ rc = -ENOMEM;
+ goto free_power_settings;
+ }
+
+ power_info->power_down_setting[0].seq_type = SENSOR_VAF;
+ power_info->power_down_setting[0].seq_val = CAM_VAF;
+ power_info->power_down_setting[0].config_val = 0;
+
+ return rc;
+
+free_power_settings:
+ kfree(power_info->power_setting);
+ power_info->power_setting = NULL;
+ power_info->power_setting_size = 0;
+ return rc;
+}
+
+static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &a_ctrl->soc_info;
+ struct cam_actuator_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ if ((power_info->power_setting == NULL) &&
+ (power_info->power_down_setting == NULL)) {
+ CAM_INFO(CAM_ACTUATOR,
+ "Using default power settings");
+ rc = cam_actuator_construct_default_power_setting(power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Construct default actuator power setting failed.");
+ return rc;
+ }
+ }
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(
+ &a_ctrl->soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR,
+ "failed to fill vreg params for power up rc:%d", rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ &a_ctrl->soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR,
+ "failed to fill vreg params power down rc:%d", rc);
+ return rc;
+ }
+
+ power_info->dev = soc_info->dev;
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR,
+ "failed in actuator power up rc %d", rc);
+ return rc;
+ }
+
+ rc = camera_io_init(&a_ctrl->io_master_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ACTUATOR, "cci init failed: rc: %d", rc);
+
+ return rc;
+}
+
+static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
+{
+ int32_t rc = 0;
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+ struct cam_actuator_soc_private *soc_private;
+
+ if (!a_ctrl) {
+ CAM_ERR(CAM_ACTUATOR, "failed: a_ctrl %pK", a_ctrl);
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+ soc_info = &a_ctrl->soc_info;
+
+ if (!power_info) {
+ CAM_ERR(CAM_ACTUATOR, "failed: power_info %pK", power_info);
+ return -EINVAL;
+ }
+ rc = cam_sensor_util_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR, "power down the core is failed:%d", rc);
+ return rc;
+ }
+
+ camera_io_release(&a_ctrl->io_master_info);
+
+ return rc;
+}
+
+static int32_t cam_actuator_i2c_modes_util(
+ struct camera_io_master *io_master_info,
+ struct i2c_settings_list *i2c_list)
+{
+ int32_t rc = 0;
+ uint32_t i, size;
+
+ if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_RANDOM) {
+ rc = camera_io_dev_write(io_master_info,
+ &(i2c_list->i2c_settings));
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to random write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) {
+ rc = camera_io_dev_write_continuous(
+ io_master_info,
+ &(i2c_list->i2c_settings),
+ 0);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to seq write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_BURST) {
+ rc = camera_io_dev_write_continuous(
+ io_master_info,
+ &(i2c_list->i2c_settings),
+ 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to burst write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ } else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
+ size = i2c_list->i2c_settings.size;
+ for (i = 0; i < size; i++) {
+ rc = camera_io_dev_poll(
+ io_master_info,
+ i2c_list->i2c_settings.reg_setting[i].reg_addr,
+ i2c_list->i2c_settings.reg_setting[i].reg_data,
+ i2c_list->i2c_settings.reg_setting[i].data_mask,
+ i2c_list->i2c_settings.addr_type,
+ i2c_list->i2c_settings.data_type,
+ i2c_list->i2c_settings.reg_setting[i].delay);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "i2c poll apply setting Fail: %d", rc);
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+int32_t cam_actuator_slaveInfo_pkt_parser(struct cam_actuator_ctrl_t *a_ctrl,
+ uint32_t *cmd_buf)
+{
+ int32_t rc = 0;
+ struct cam_cmd_i2c_info *i2c_info;
+
+ if (!a_ctrl || !cmd_buf) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+ if (a_ctrl->io_master_info.master_type == CCI_MASTER) {
+ a_ctrl->io_master_info.cci_client->cci_i2c_master =
+ a_ctrl->cci_i2c_master;
+ a_ctrl->io_master_info.cci_client->i2c_freq_mode =
+ i2c_info->i2c_freq_mode;
+ a_ctrl->io_master_info.cci_client->sid =
+ i2c_info->slave_addr >> 1;
+ CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x Freq Mode: %d",
+ i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+ } else if (a_ctrl->io_master_info.master_type == I2C_MASTER) {
+ a_ctrl->io_master_info.client->addr = i2c_info->slave_addr;
+ CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x", i2c_info->slave_addr);
+ } else {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Master type: %d",
+ a_ctrl->io_master_info.master_type);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int32_t cam_actuator_apply_settings(struct cam_actuator_ctrl_t *a_ctrl,
+ struct i2c_settings_array *i2c_set)
+{
+ struct i2c_settings_list *i2c_list;
+ int32_t rc = 0;
+
+ if (a_ctrl == NULL || i2c_set == NULL) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ if (i2c_set->is_settings_valid != 1) {
+ CAM_ERR(CAM_ACTUATOR, " Invalid settings");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(i2c_list,
+ &(i2c_set->list_head), list) {
+ rc = cam_actuator_i2c_modes_util(
+ &(a_ctrl->io_master_info),
+ i2c_list);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to apply settings: %d",
+ rc);
+ } else {
+ CAM_DBG(CAM_ACTUATOR,
+ "Success:request ID: %d",
+ i2c_set->request_id);
+ }
+ }
+
+ return rc;
+}
+
+int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+ int32_t rc = 0, request_id, del_req_id;
+ struct cam_actuator_ctrl_t *a_ctrl = NULL;
+
+ if (!apply) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Input Args");
+ return -EINVAL;
+ }
+
+ a_ctrl = (struct cam_actuator_ctrl_t *)
+ cam_get_device_priv(apply->dev_hdl);
+ if (!a_ctrl) {
+ CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
+ return -EINVAL;
+ }
+ request_id = apply->request_id % MAX_PER_FRAME_ARRAY;
+
+ trace_cam_apply_req("Actuator", apply->request_id);
+
+ CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id);
+ mutex_lock(&(a_ctrl->actuator_mutex));
+ if ((apply->request_id ==
+ a_ctrl->i2c_data.per_frame[request_id].request_id) &&
+ (a_ctrl->i2c_data.per_frame[request_id].is_settings_valid)
+ == 1) {
+ rc = cam_actuator_apply_settings(a_ctrl,
+ &a_ctrl->i2c_data.per_frame[request_id]);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in applying the request: %lld\n",
+ apply->request_id);
+ goto release_mutex;
+ }
+ }
+ del_req_id = (request_id +
+ MAX_PER_FRAME_ARRAY - MAX_SYSTEM_PIPELINE_DELAY) %
+ MAX_PER_FRAME_ARRAY;
+
+ if (apply->request_id >
+ a_ctrl->i2c_data.per_frame[del_req_id].request_id) {
+ a_ctrl->i2c_data.per_frame[del_req_id].request_id = 0;
+ rc = delete_request(&a_ctrl->i2c_data.per_frame[del_req_id]);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Fail deleting the req: %d err: %d\n",
+ del_req_id, rc);
+ goto release_mutex;
+ }
+ } else {
+ CAM_DBG(CAM_ACTUATOR, "No Valid Req to clean Up");
+ }
+
+release_mutex:
+ mutex_unlock(&(a_ctrl->actuator_mutex));
+ return rc;
+}
+
+int32_t cam_actuator_establish_link(
+ struct cam_req_mgr_core_dev_link_setup *link)
+{
+ struct cam_actuator_ctrl_t *a_ctrl = NULL;
+
+ if (!link) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ a_ctrl = (struct cam_actuator_ctrl_t *)
+ cam_get_device_priv(link->dev_hdl);
+ if (!a_ctrl) {
+ CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(a_ctrl->actuator_mutex));
+ if (link->link_enable) {
+ a_ctrl->bridge_intf.link_hdl = link->link_hdl;
+ a_ctrl->bridge_intf.crm_cb = link->crm_cb;
+ } else {
+ a_ctrl->bridge_intf.link_hdl = -1;
+ a_ctrl->bridge_intf.crm_cb = NULL;
+ }
+ mutex_unlock(&(a_ctrl->actuator_mutex));
+
+ return 0;
+}
+
+static void cam_actuator_update_req_mgr(
+ struct cam_actuator_ctrl_t *a_ctrl,
+ struct cam_packet *csl_packet)
+{
+ struct cam_req_mgr_add_request add_req;
+
+ add_req.link_hdl = a_ctrl->bridge_intf.link_hdl;
+ add_req.req_id = csl_packet->header.request_id;
+ add_req.dev_hdl = a_ctrl->bridge_intf.device_hdl;
+ add_req.skip_before_applying = 0;
+
+ if (a_ctrl->bridge_intf.crm_cb &&
+ a_ctrl->bridge_intf.crm_cb->add_req) {
+ a_ctrl->bridge_intf.crm_cb->add_req(&add_req);
+ CAM_DBG(CAM_ACTUATOR, "Request Id: %lld added to CRM",
+ add_req.req_id);
+ } else {
+ CAM_ERR(CAM_ACTUATOR, "Can't add Request ID: %lld to CRM",
+ csl_packet->header.request_id);
+ }
+}
+
+int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+ if (!info) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ info->dev_id = CAM_REQ_MGR_DEVICE_ACTUATOR;
+ strlcpy(info->name, CAM_ACTUATOR_NAME, sizeof(info->name));
+ info->p_delay = 1;
+ info->trigger = CAM_TRIGGER_POINT_SOF;
+
+ return 0;
+}
+
+int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
+ void *arg)
+{
+ int32_t rc = 0;
+ int32_t i = 0;
+ uint32_t total_cmd_buf_in_bytes = 0;
+ size_t len_of_buff = 0;
+ uint32_t *offset = NULL;
+ uint32_t *cmd_buf = NULL;
+ uintptr_t generic_ptr;
+ struct common_header *cmm_hdr = NULL;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_config_dev_cmd config;
+ struct i2c_data_settings *i2c_data = NULL;
+ struct i2c_settings_array *i2c_reg_settings = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct cam_actuator_soc_private *soc_private = NULL;
+ struct cam_sensor_power_ctrl_t *power_info = NULL;
+
+ if (!a_ctrl || !arg) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+
+ power_info = &soc_private->power_info;
+
+ ioctl_ctrl = (struct cam_control *)arg;
+ if (copy_from_user(&config,
+ u64_to_user_ptr(ioctl_ctrl->handle),
+ sizeof(config)))
+ return -EFAULT;
+ rc = cam_mem_get_cpu_buf(config.packet_handle,
+ &generic_ptr, &len_of_buff);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "Error in converting command Handle %d",
+ rc);
+ return rc;
+ }
+
+ if (config.offset > len_of_buff) {
+ CAM_ERR(CAM_ACTUATOR,
+ "offset is out of bounds: offset: %lld len: %zu",
+ config.offset, len_of_buff);
+ return -EINVAL;
+ }
+
+ csl_packet =
+ (struct cam_packet *)(generic_ptr + (uint32_t)config.offset);
+ CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code);
+
+ if ((csl_packet->header.op_code & 0xFFFFFF) !=
+ CAM_ACTUATOR_PACKET_OPCODE_INIT &&
+ csl_packet->header.request_id <= a_ctrl->last_flush_req
+ && a_ctrl->last_flush_req != 0) {
+ CAM_DBG(CAM_ACTUATOR,
+ "reject request %lld, last request to flush %lld",
+ csl_packet->header.request_id, a_ctrl->last_flush_req);
+ return -EINVAL;
+ }
+
+ if (csl_packet->header.request_id > a_ctrl->last_flush_req)
+ a_ctrl->last_flush_req = 0;
+
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_ACTUATOR_PACKET_OPCODE_INIT:
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+ /* Loop through multiple command buffers */
+ for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+ total_cmd_buf_in_bytes = cmd_desc[i].length;
+ if (!total_cmd_buf_in_bytes)
+ continue;
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &generic_ptr, &len_of_buff);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
+ return rc;
+ }
+ cmd_buf = (uint32_t *)generic_ptr;
+ if (!cmd_buf) {
+ CAM_ERR(CAM_ACTUATOR, "invalid cmd buf");
+ return -EINVAL;
+ }
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+ cmm_hdr = (struct common_header *)cmd_buf;
+
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+ CAM_DBG(CAM_ACTUATOR,
+ "Received slave info buffer");
+ rc = cam_actuator_slaveInfo_pkt_parser(
+ a_ctrl, cmd_buf);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to parse slave info: %d", rc);
+ return rc;
+ }
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+ case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+ CAM_DBG(CAM_ACTUATOR,
+ "Received power settings buffer");
+ rc = cam_sensor_update_power_settings(
+ cmd_buf,
+ total_cmd_buf_in_bytes,
+ power_info);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed:parse power settings: %d",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_ACTUATOR,
+ "Received initSettings buffer");
+ i2c_data = &(a_ctrl->i2c_data);
+ i2c_reg_settings =
+ &i2c_data->init_settings;
+
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = 1;
+ rc = cam_sensor_i2c_command_parser(
+ &a_ctrl->io_master_info,
+ i2c_reg_settings,
+ &cmd_desc[i], 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed:parse init settings: %d",
+ rc);
+ return rc;
+ }
+ break;
+ }
+ }
+
+ if (a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE) {
+ rc = cam_actuator_power_up(a_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ " Actuator Power up failed");
+ return rc;
+ }
+ a_ctrl->cam_act_state = CAM_ACTUATOR_CONFIG;
+ }
+
+ rc = cam_actuator_apply_settings(a_ctrl,
+ &a_ctrl->i2c_data.init_settings);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
+ return rc;
+ }
+
+ /* Delete the request even if the apply is failed */
+ rc = delete_request(&a_ctrl->i2c_data.init_settings);
+ if (rc < 0) {
+ CAM_WARN(CAM_ACTUATOR,
+ "Fail in deleting the Init settings");
+ rc = 0;
+ }
+ break;
+ case CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS:
+ if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_ACTUATOR,
+ "Not in right state to move lens: %d",
+ a_ctrl->cam_act_state);
+ return rc;
+ }
+ a_ctrl->setting_apply_state = ACT_APPLY_SETTINGS_NOW;
+
+ i2c_data = &(a_ctrl->i2c_data);
+ i2c_reg_settings = &i2c_data->init_settings;
+
+ i2c_data->init_settings.request_id =
+ csl_packet->header.request_id;
+ i2c_reg_settings->is_settings_valid = 1;
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += csl_packet->cmd_buf_offset / sizeof(uint32_t);
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_sensor_i2c_command_parser(
+ &a_ctrl->io_master_info,
+ i2c_reg_settings,
+ cmd_desc, 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Auto move lens parsing failed: %d", rc);
+ return rc;
+ }
+ cam_actuator_update_req_mgr(a_ctrl, csl_packet);
+ break;
+ case CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS:
+ if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_ACTUATOR,
+ "Not in right state to move lens: %d",
+ a_ctrl->cam_act_state);
+ return rc;
+ }
+
+ a_ctrl->setting_apply_state = ACT_APPLY_SETTINGS_LATER;
+ i2c_data = &(a_ctrl->i2c_data);
+ i2c_reg_settings = &i2c_data->per_frame[
+ csl_packet->header.request_id % MAX_PER_FRAME_ARRAY];
+
+ i2c_reg_settings->request_id =
+ csl_packet->header.request_id;
+ i2c_reg_settings->is_settings_valid = 1;
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += csl_packet->cmd_buf_offset / sizeof(uint32_t);
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_sensor_i2c_command_parser(
+ &a_ctrl->io_master_info,
+ i2c_reg_settings,
+ cmd_desc, 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Manual move lens parsing failed: %d", rc);
+ return rc;
+ }
+
+ cam_actuator_update_req_mgr(a_ctrl, csl_packet);
+ break;
+ case CAM_PKT_NOP_OPCODE:
+ if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) {
+ CAM_WARN(CAM_ACTUATOR,
+ "Received NOP packets in invalid state: %d",
+ a_ctrl->cam_act_state);
+ return -EINVAL;
+ }
+
+ cam_actuator_update_req_mgr(a_ctrl, csl_packet);
+ break;
+ }
+
+ return rc;
+}
+
+void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl)
+{
+ int rc = 0;
+ struct cam_actuator_soc_private *soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info =
+ &soc_private->power_info;
+
+ if (a_ctrl->cam_act_state == CAM_ACTUATOR_INIT)
+ return;
+
+ if (a_ctrl->cam_act_state >= CAM_ACTUATOR_CONFIG) {
+ rc = cam_actuator_power_down(a_ctrl);
+ if (rc < 0)
+ CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+ a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
+ }
+
+ if (a_ctrl->cam_act_state >= CAM_ACTUATOR_ACQUIRE) {
+ rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_ACTUATOR, "destroying dhdl failed");
+ a_ctrl->bridge_intf.device_hdl = -1;
+ a_ctrl->bridge_intf.link_hdl = -1;
+ a_ctrl->bridge_intf.session_hdl = -1;
+ }
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_setting_size = 0;
+ power_info->power_down_setting_size = 0;
+
+ a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
+}
+
+int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
+ void *arg)
+{
+ int rc = 0;
+ struct cam_control *cmd = (struct cam_control *)arg;
+ struct cam_actuator_soc_private *soc_private = NULL;
+ struct cam_sensor_power_ctrl_t *power_info = NULL;
+
+ if (!a_ctrl || !cmd) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+
+ power_info = &soc_private->power_info;
+
+ if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid handle type: %d",
+ cmd->handle_type);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ACTUATOR, "Opcode to Actuator: %d", cmd->op_code);
+
+ mutex_lock(&(a_ctrl->actuator_mutex));
+ switch (cmd->op_code) {
+ case CAM_ACQUIRE_DEV: {
+ struct cam_sensor_acquire_dev actuator_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+
+ if (a_ctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_ACTUATOR, "Device is already acquired");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ rc = copy_from_user(&actuator_acq_dev,
+ u64_to_user_ptr(cmd->handle),
+ sizeof(actuator_acq_dev));
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "Failed Copying from user\n");
+ goto release_mutex;
+ }
+
+ bridge_params.session_hdl = actuator_acq_dev.session_handle;
+ bridge_params.ops = &a_ctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = a_ctrl;
+
+ actuator_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ a_ctrl->bridge_intf.device_hdl = actuator_acq_dev.device_handle;
+ a_ctrl->bridge_intf.session_hdl =
+ actuator_acq_dev.session_handle;
+
+ CAM_DBG(CAM_ACTUATOR, "Device Handle: %d",
+ actuator_acq_dev.device_handle);
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &actuator_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev))) {
+ CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+
+ a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
+ }
+ break;
+ case CAM_RELEASE_DEV: {
+ if (a_ctrl->cam_act_state == CAM_ACTUATOR_START) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_ACTUATOR,
+ "Cant release actuator: in start state");
+ goto release_mutex;
+ }
+
+ if (a_ctrl->cam_act_state == CAM_ACTUATOR_CONFIG) {
+ rc = cam_actuator_power_down(a_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Actuator Power down failed");
+ goto release_mutex;
+ }
+ }
+
+ if (a_ctrl->bridge_intf.device_hdl == -1) {
+ CAM_ERR(CAM_ACTUATOR, "link hdl: %d device hdl: %d",
+ a_ctrl->bridge_intf.device_hdl,
+ a_ctrl->bridge_intf.link_hdl);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ if (a_ctrl->bridge_intf.link_hdl != -1) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Device [%d] still active on link 0x%x",
+ a_ctrl->cam_act_state,
+ a_ctrl->bridge_intf.link_hdl);
+ rc = -EAGAIN;
+ goto release_mutex;
+ }
+
+ rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_ACTUATOR, "destroying the device hdl");
+ a_ctrl->bridge_intf.device_hdl = -1;
+ a_ctrl->bridge_intf.link_hdl = -1;
+ a_ctrl->bridge_intf.session_hdl = -1;
+ a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
+ a_ctrl->last_flush_req = 0;
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_down_setting_size = 0;
+ power_info->power_setting_size = 0;
+ }
+ break;
+ case CAM_QUERY_CAP: {
+ struct cam_actuator_query_cap actuator_cap = {0};
+
+ actuator_cap.slot_info = a_ctrl->soc_info.index;
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &actuator_cap,
+ sizeof(struct cam_actuator_query_cap))) {
+ CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ }
+ break;
+ case CAM_START_DEV: {
+ if (a_ctrl->cam_act_state != CAM_ACTUATOR_CONFIG) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_ACTUATOR,
+ "Not in right state to start : %d",
+ a_ctrl->cam_act_state);
+ goto release_mutex;
+ }
+ a_ctrl->cam_act_state = CAM_ACTUATOR_START;
+ a_ctrl->last_flush_req = 0;
+ }
+ break;
+ case CAM_STOP_DEV: {
+ struct i2c_settings_array *i2c_set = NULL;
+ int i;
+
+ if (a_ctrl->cam_act_state != CAM_ACTUATOR_START) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_ACTUATOR,
+ "Not in right state to stop : %d",
+ a_ctrl->cam_act_state);
+ goto release_mutex;
+ }
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ i2c_set = &(a_ctrl->i2c_data.per_frame[i]);
+
+ if (i2c_set->is_settings_valid == 1) {
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "delete request: %lld rc: %d",
+ i2c_set->request_id, rc);
+ }
+ }
+ a_ctrl->last_flush_req = 0;
+ a_ctrl->cam_act_state = CAM_ACTUATOR_CONFIG;
+ }
+ break;
+ case CAM_CONFIG_DEV: {
+ a_ctrl->setting_apply_state =
+ ACT_APPLY_SETTINGS_LATER;
+ rc = cam_actuator_i2c_pkt_parse(a_ctrl, arg);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "Failed in actuator Parsing");
+ goto release_mutex;
+ }
+
+ if (a_ctrl->setting_apply_state ==
+ ACT_APPLY_SETTINGS_NOW) {
+ rc = cam_actuator_apply_settings(a_ctrl,
+ &a_ctrl->i2c_data.init_settings);
+ if (rc < 0)
+ CAM_ERR(CAM_ACTUATOR,
+ "Cannot apply Update settings");
+
+ /* Delete the request even if the apply is failed */
+ rc = delete_request(&a_ctrl->i2c_data.init_settings);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in Deleting the Init Pkt: %d",
+ rc);
+ goto release_mutex;
+ }
+ }
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ACTUATOR, "Invalid Opcode %d", cmd->op_code);
+ }
+
+release_mutex:
+ mutex_unlock(&(a_ctrl->actuator_mutex));
+
+ return rc;
+}
+
+int32_t cam_actuator_flush_request(struct cam_req_mgr_flush_request *flush_req)
+{
+ int32_t rc = 0, i;
+ uint32_t cancel_req_id_found = 0;
+ struct cam_actuator_ctrl_t *a_ctrl = NULL;
+ struct i2c_settings_array *i2c_set = NULL;
+
+ if (!flush_req)
+ return -EINVAL;
+
+ a_ctrl = (struct cam_actuator_ctrl_t *)
+ cam_get_device_priv(flush_req->dev_hdl);
+ if (!a_ctrl) {
+ CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ if (a_ctrl->i2c_data.per_frame == NULL) {
+ CAM_ERR(CAM_ACTUATOR, "i2c frame data is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(a_ctrl->actuator_mutex));
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ a_ctrl->last_flush_req = flush_req->req_id;
+ CAM_DBG(CAM_ACTUATOR, "last reqest to flush is %lld",
+ flush_req->req_id);
+ }
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ i2c_set = &(a_ctrl->i2c_data.per_frame[i]);
+
+ if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+ && (i2c_set->request_id != flush_req->req_id))
+ continue;
+
+ if (i2c_set->is_settings_valid == 1) {
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_ACTUATOR,
+ "delete request: %lld rc: %d",
+ i2c_set->request_id, rc);
+
+ if (flush_req->type ==
+ CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ }
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+ !cancel_req_id_found)
+ CAM_DBG(CAM_ACTUATOR,
+ "Flush request id:%lld not found in the pending list",
+ flush_req->req_id);
+ mutex_unlock(&(a_ctrl->actuator_mutex));
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_core.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_core.h
new file mode 100644
index 000000000000..f17a0ec0aa76
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_core.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ACTUATOR_CORE_H_
+#define _CAM_ACTUATOR_CORE_H_
+
+#include "cam_actuator_dev.h"
+
+/**
+ * @power_info: power setting info to control the power
+ *
+ * This API construct the default actuator power setting.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int32_t cam_actuator_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info);
+
+/**
+ * @apply: Req mgr structure for applying request
+ *
+ * This API applies the request that is mentioned
+ */
+int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply);
+
+/**
+ * @info: Sub device info to req mgr
+ *
+ * This API publish the subdevice info to req mgr
+ */
+int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info);
+
+/**
+ * @flush: Req mgr structure for flushing request
+ *
+ * This API flushes the request that is mentioned
+ */
+int cam_actuator_flush_request(struct cam_req_mgr_flush_request *flush);
+
+
+/**
+ * @link: Link setup info
+ *
+ * This API establishes link actuator subdevice with req mgr
+ */
+int32_t cam_actuator_establish_link(
+ struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * @a_ctrl: Actuator ctrl structure
+ * @arg: Camera control command argument
+ *
+ * This API handles the camera control argument reached to actuator
+ */
+int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl, void *arg);
+
+/**
+ * @a_ctrl: Actuator ctrl structure
+ *
+ * This API handles the shutdown ioctl/close
+ */
+void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl);
+
+#endif /* _CAM_ACTUATOR_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_dev.c
new file mode 100644
index 000000000000..26d73a446a5b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -0,0 +1,448 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_actuator_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_actuator_soc.h"
+#include "cam_actuator_core.h"
+#include "cam_trace.h"
+
+static long cam_actuator_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_actuator_ctrl_t *a_ctrl =
+ v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_actuator_driver_cmd(a_ctrl, arg);
+ break;
+ default:
+ CAM_ERR(CAM_ACTUATOR, "Invalid ioctl cmd");
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_actuator_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ cmd = VIDIOC_CAM_CONTROL;
+ rc = cam_actuator_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in actuator subdev handling rc: %d",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_ACTUATOR, "Invalid compat ioctl: %d", cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to copy to user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+#endif
+
+static int cam_actuator_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_actuator_ctrl_t *a_ctrl =
+ v4l2_get_subdevdata(sd);
+
+ if (!a_ctrl) {
+ CAM_ERR(CAM_ACTUATOR, "a_ctrl ptr is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(a_ctrl->actuator_mutex));
+ cam_actuator_shutdown(a_ctrl);
+ mutex_unlock(&(a_ctrl->actuator_mutex));
+
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops cam_actuator_subdev_core_ops = {
+ .ioctl = cam_actuator_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_actuator_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_actuator_subdev_ops = {
+ .core = &cam_actuator_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_actuator_internal_ops = {
+ .close = cam_actuator_subdev_close,
+};
+
+static int cam_actuator_init_subdev(struct cam_actuator_ctrl_t *a_ctrl)
+{
+ int rc = 0;
+
+ a_ctrl->v4l2_dev_str.internal_ops =
+ &cam_actuator_internal_ops;
+ a_ctrl->v4l2_dev_str.ops =
+ &cam_actuator_subdev_ops;
+ strlcpy(a_ctrl->device_name, CAMX_ACTUATOR_DEV_NAME,
+ sizeof(a_ctrl->device_name));
+ a_ctrl->v4l2_dev_str.name =
+ a_ctrl->device_name;
+ a_ctrl->v4l2_dev_str.sd_flags =
+ (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+ a_ctrl->v4l2_dev_str.ent_function =
+ CAM_ACTUATOR_DEVICE_TYPE;
+ a_ctrl->v4l2_dev_str.token = a_ctrl;
+
+ rc = cam_register_subdev(&(a_ctrl->v4l2_dev_str));
+ if (rc)
+ CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev rc: %d", rc);
+
+ return rc;
+}
+
+static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int32_t rc = 0;
+ int32_t i = 0;
+ struct cam_actuator_ctrl_t *a_ctrl;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_actuator_soc_private *soc_private = NULL;
+
+ if (client == NULL || id == NULL) {
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK id: %pK",
+ client, id);
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ CAM_ERR(CAM_ACTUATOR, "%s :: i2c_check_functionality failed",
+ client->name);
+ rc = -EFAULT;
+ return rc;
+ }
+
+ /* Create sensor control structure */
+ a_ctrl = kzalloc(sizeof(*a_ctrl), GFP_KERNEL);
+ if (!a_ctrl)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, a_ctrl);
+
+ soc_private = kzalloc(sizeof(struct cam_actuator_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto free_ctrl;
+ }
+ a_ctrl->soc_info.soc_private = soc_private;
+
+ a_ctrl->io_master_info.client = client;
+ soc_info = &a_ctrl->soc_info;
+ soc_info->dev = &client->dev;
+ soc_info->dev_name = client->name;
+ a_ctrl->io_master_info.master_type = I2C_MASTER;
+
+ rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "failed: cam_sensor_parse_dt rc %d", rc);
+ goto free_soc;
+ }
+
+ rc = cam_actuator_init_subdev(a_ctrl);
+ if (rc)
+ goto free_soc;
+
+ if (soc_private->i2c_info.slave_addr != 0)
+ a_ctrl->io_master_info.client->addr =
+ soc_private->i2c_info.slave_addr;
+
+ a_ctrl->i2c_data.per_frame =
+ (struct i2c_settings_array *)
+ kzalloc(sizeof(struct i2c_settings_array) *
+ MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+ if (a_ctrl->i2c_data.per_frame == NULL) {
+ rc = -ENOMEM;
+ goto unreg_subdev;
+ }
+
+ INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head));
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+ INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
+
+ a_ctrl->bridge_intf.device_hdl = -1;
+ a_ctrl->bridge_intf.link_hdl = -1;
+ a_ctrl->bridge_intf.ops.get_dev_info =
+ cam_actuator_publish_dev_info;
+ a_ctrl->bridge_intf.ops.link_setup =
+ cam_actuator_establish_link;
+ a_ctrl->bridge_intf.ops.apply_req =
+ cam_actuator_apply_request;
+
+ v4l2_set_subdevdata(&(a_ctrl->v4l2_dev_str.sd), a_ctrl);
+
+ a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
+
+ return rc;
+
+unreg_subdev:
+ cam_unregister_subdev(&(a_ctrl->v4l2_dev_str));
+free_soc:
+ kfree(soc_private);
+free_ctrl:
+ kfree(a_ctrl);
+ return rc;
+}
+
+static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct cam_actuator_ctrl_t *a_ctrl;
+ struct cam_actuator_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ a_ctrl = platform_get_drvdata(pdev);
+ if (!a_ctrl) {
+ CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
+ return 0;
+ }
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ kfree(a_ctrl->io_master_info.cci_client);
+ a_ctrl->io_master_info.cci_client = NULL;
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ kfree(a_ctrl->soc_info.soc_private);
+ kfree(a_ctrl->i2c_data.per_frame);
+ a_ctrl->i2c_data.per_frame = NULL;
+ devm_kfree(&pdev->dev, a_ctrl);
+
+ return rc;
+}
+
+static int32_t cam_actuator_driver_i2c_remove(struct i2c_client *client)
+{
+ int32_t rc = 0;
+ struct cam_actuator_ctrl_t *a_ctrl =
+ i2c_get_clientdata(client);
+ struct cam_actuator_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ /* Handle I2C Devices */
+ if (!a_ctrl) {
+ CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ /*Free Allocated Mem */
+ kfree(a_ctrl->i2c_data.per_frame);
+ a_ctrl->i2c_data.per_frame = NULL;
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ kfree(a_ctrl->soc_info.soc_private);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ a_ctrl->soc_info.soc_private = NULL;
+ kfree(a_ctrl);
+ return rc;
+}
+
+static const struct of_device_id cam_actuator_driver_dt_match[] = {
+ {.compatible = "qcom,actuator"},
+ {}
+};
+
+static int32_t cam_actuator_driver_platform_probe(
+ struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ int32_t i = 0;
+ struct cam_actuator_ctrl_t *a_ctrl = NULL;
+ struct cam_actuator_soc_private *soc_private = NULL;
+
+ /* Create actuator control structure */
+ a_ctrl = devm_kzalloc(&pdev->dev,
+ sizeof(struct cam_actuator_ctrl_t), GFP_KERNEL);
+ if (!a_ctrl)
+ return -ENOMEM;
+
+ /*fill in platform device*/
+ a_ctrl->v4l2_dev_str.pdev = pdev;
+ a_ctrl->soc_info.pdev = pdev;
+ a_ctrl->soc_info.dev = &pdev->dev;
+ a_ctrl->soc_info.dev_name = pdev->name;
+ a_ctrl->io_master_info.master_type = CCI_MASTER;
+
+ a_ctrl->io_master_info.cci_client = kzalloc(sizeof(
+ struct cam_sensor_cci_client), GFP_KERNEL);
+ if (!(a_ctrl->io_master_info.cci_client)) {
+ rc = -ENOMEM;
+ goto free_ctrl;
+ }
+
+ soc_private = kzalloc(sizeof(struct cam_actuator_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto free_cci_client;
+ }
+ a_ctrl->soc_info.soc_private = soc_private;
+ soc_private->power_info.dev = &pdev->dev;
+
+ a_ctrl->i2c_data.per_frame =
+ (struct i2c_settings_array *)
+ kzalloc(sizeof(struct i2c_settings_array) *
+ MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+ if (a_ctrl->i2c_data.per_frame == NULL) {
+ rc = -ENOMEM;
+ goto free_soc;
+ }
+
+ INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head));
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+ INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
+
+ rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev));
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "Paring actuator dt failed rc %d", rc);
+ goto free_mem;
+ }
+
+ /* Fill platform device id*/
+ pdev->id = a_ctrl->soc_info.index;
+
+ rc = cam_actuator_init_subdev(a_ctrl);
+ if (rc)
+ goto free_mem;
+
+ a_ctrl->bridge_intf.device_hdl = -1;
+ a_ctrl->bridge_intf.link_hdl = -1;
+ a_ctrl->bridge_intf.ops.get_dev_info =
+ cam_actuator_publish_dev_info;
+ a_ctrl->bridge_intf.ops.link_setup =
+ cam_actuator_establish_link;
+ a_ctrl->bridge_intf.ops.apply_req =
+ cam_actuator_apply_request;
+ a_ctrl->bridge_intf.ops.flush_req =
+ cam_actuator_flush_request;
+ a_ctrl->last_flush_req = 0;
+
+ platform_set_drvdata(pdev, a_ctrl);
+ v4l2_set_subdevdata(&a_ctrl->v4l2_dev_str.sd, a_ctrl);
+ a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
+
+ return rc;
+
+free_mem:
+ kfree(a_ctrl->i2c_data.per_frame);
+free_soc:
+ kfree(soc_private);
+free_cci_client:
+ kfree(a_ctrl->io_master_info.cci_client);
+free_ctrl:
+ devm_kfree(&pdev->dev, a_ctrl);
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_actuator_driver_dt_match);
+
+static struct platform_driver cam_actuator_platform_driver = {
+ .probe = cam_actuator_driver_platform_probe,
+ .driver = {
+ .name = "qcom,actuator",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_actuator_driver_dt_match,
+ .suppress_bind_attrs = true,
+ },
+ .remove = cam_actuator_platform_remove,
+};
+
+static const struct i2c_device_id i2c_id[] = {
+ {ACTUATOR_DRIVER_I2C, (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver cam_actuator_driver_i2c = {
+ .id_table = i2c_id,
+ .probe = cam_actuator_driver_i2c_probe,
+ .remove = cam_actuator_driver_i2c_remove,
+ .driver = {
+ .name = ACTUATOR_DRIVER_I2C,
+ },
+};
+
+static int __init cam_actuator_driver_init(void)
+{
+ int32_t rc = 0;
+
+ rc = platform_driver_register(&cam_actuator_platform_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "platform_driver_register failed rc = %d", rc);
+ return rc;
+ }
+ rc = i2c_add_driver(&cam_actuator_driver_i2c);
+ if (rc)
+ CAM_ERR(CAM_ACTUATOR, "i2c_add_driver failed rc = %d", rc);
+
+ return rc;
+}
+
+static void __exit cam_actuator_driver_exit(void)
+{
+ platform_driver_unregister(&cam_actuator_platform_driver);
+ i2c_del_driver(&cam_actuator_driver_i2c);
+}
+
+module_init(cam_actuator_driver_init);
+module_exit(cam_actuator_driver_exit);
+MODULE_DESCRIPTION("cam_actuator_driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_dev.h
new file mode 100644
index 000000000000..36b2994b9d71
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -0,0 +1,132 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ACTUATOR_DEV_H_
+#define _CAM_ACTUATOR_DEV_H_
+
+#include <cam_sensor_io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_cci_dev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_subdev.h>
+#include "cam_sensor_util.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE 1
+#define FALSE 0
+
+#define ACTUATOR_DRIVER_I2C "i2c_actuator"
+#define CAMX_ACTUATOR_DEV_NAME "cam-actuator-driver"
+
+#define MSM_ACTUATOR_MAX_VREGS (10)
+#define ACTUATOR_MAX_POLL_COUNT 10
+
+
+enum cam_actuator_apply_state_t {
+ ACT_APPLY_SETTINGS_NOW,
+ ACT_APPLY_SETTINGS_LATER,
+};
+
+enum cam_actuator_state {
+ CAM_ACTUATOR_INIT,
+ CAM_ACTUATOR_ACQUIRE,
+ CAM_ACTUATOR_CONFIG,
+ CAM_ACTUATOR_START,
+};
+
+/**
+ * struct cam_actuator_i2c_info_t - I2C info
+ * @slave_addr : slave address
+ * @i2c_freq_mode : i2c frequency mode
+ */
+struct cam_actuator_i2c_info_t {
+ uint16_t slave_addr;
+ uint8_t i2c_freq_mode;
+};
+
+struct cam_actuator_soc_private {
+ struct cam_actuator_i2c_info_t i2c_info;
+ struct cam_sensor_power_ctrl_t power_info;
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_actuator_ctrl_t
+ * @i2c_driver: I2C device info
+ * @pdev: Platform device
+ * @cci_i2c_master: I2C structure
+ * @io_master_info: Information about the communication master
+ * @actuator_mutex: Actuator mutex
+ * @act_apply_state: Actuator settings aRegulator config
+ * @id: Cell Index
+ * @res_apply_state: Actuator settings apply state
+ * @cam_act_state: Actuator state
+ * @gconf: GPIO config
+ * @pinctrl_info: Pinctrl information
+ * @v4l2_dev_str: V4L2 device structure
+ * @i2c_data: I2C register settings structure
+ * @act_info: Sensor query cap structure
+ * @of_node: Node ptr
+ * @device_name: Device name
+ * @last_flush_req: Last request to flush
+ */
+struct cam_actuator_ctrl_t {
+ struct i2c_driver *i2c_driver;
+ enum cci_i2c_master_t cci_i2c_master;
+ enum cci_device_num cci_num;
+ struct camera_io_master io_master_info;
+ struct cam_hw_soc_info soc_info;
+ struct mutex actuator_mutex;
+ uint32_t id;
+ enum cam_actuator_apply_state_t setting_apply_state;
+ enum cam_actuator_state cam_act_state;
+ uint8_t cam_pinctrl_status;
+ struct cam_subdev v4l2_dev_str;
+ struct i2c_data_settings i2c_data;
+ struct cam_actuator_query_cap act_info;
+ struct intf_params bridge_intf;
+ char device_name[20];
+ uint32_t last_flush_req;
+};
+
+#endif /* _CAM_ACTUATOR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_soc.c
new file mode 100644
index 000000000000..8421e31e65b3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -0,0 +1,86 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+#include "cam_actuator_soc.h"
+#include "cam_soc_util.h"
+
+int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
+ struct device *dev)
+{
+ int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+ struct cam_actuator_soc_private *soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+ struct device_node *of_node = NULL;
+
+ /* Initialize mutex */
+ mutex_init(&(a_ctrl->actuator_mutex));
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "parsing common soc dt(rc %d)", rc);
+ return rc;
+ }
+
+ of_node = soc_info->dev->of_node;
+
+ if (a_ctrl->io_master_info.master_type == CCI_MASTER) {
+ rc = of_property_read_u32(of_node, "cci-master",
+ &(a_ctrl->cci_i2c_master));
+ CAM_DBG(CAM_ACTUATOR, "cci-master %d, rc %d",
+ a_ctrl->cci_i2c_master, rc);
+ if ((rc < 0) || (a_ctrl->cci_i2c_master >= MASTER_MAX)) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Wrong info: rc: %d, dt CCI master:%d",
+ rc, a_ctrl->cci_i2c_master);
+ rc = -EFAULT;
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "cci-device",
+ &a_ctrl->cci_num);
+ CAM_DBG(CAM_ACTUATOR, "cci-device %d, rc %d",
+ a_ctrl->cci_num, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ a_ctrl->cci_num = CCI_DEVICE_0;
+ rc = 0;
+ }
+ a_ctrl->io_master_info.cci_client->cci_device = a_ctrl->cci_num;
+ }
+
+ if (!soc_info->gpio_data) {
+ CAM_INFO(CAM_ACTUATOR, "No GPIO found");
+ rc = 0;
+ return rc;
+ }
+
+ if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+ CAM_INFO(CAM_ACTUATOR, "No GPIO found");
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &power_info->gpio_num_info);
+ if ((rc < 0) || (!power_info->gpio_num_info)) {
+ CAM_ERR(CAM_ACTUATOR, "No/Error Actuator GPIOs");
+ return -EINVAL;
+ }
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_soc.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_soc.h
new file mode 100644
index 000000000000..9c84f43fa7ce
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_actuator/cam_actuator_soc.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ACTUATOR_SOC_H_
+#define _CAM_ACTUATOR_SOC_H_
+
+#include "cam_actuator_dev.h"
+
+/**
+ * @a_ctrl: Actuator ctrl structure
+ *
+ * This API parses actuator device tree
+ */
+int cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
+ struct device *dev);
+
+#endif /* _CAM_ACTUATOR_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/Makefile
new file mode 100644
index 000000000000..3da78c00c5bd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci_dev.o cam_cci_core.o cam_cci_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.c
new file mode 100644
index 000000000000..b0e810711c5d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -0,0 +1,1646 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_cci_core.h"
+#include "cam_cci_dev.h"
+
+static int32_t cam_cci_convert_type_to_num_bytes(
+ enum camera_sensor_i2c_type type)
+{
+ int32_t num_bytes;
+
+ switch (type) {
+ case CAMERA_SENSOR_I2C_TYPE_BYTE:
+ num_bytes = 1;
+ break;
+ case CAMERA_SENSOR_I2C_TYPE_WORD:
+ num_bytes = 2;
+ break;
+ case CAMERA_SENSOR_I2C_TYPE_3B:
+ num_bytes = 3;
+ break;
+ case CAMERA_SENSOR_I2C_TYPE_DWORD:
+ num_bytes = 4;
+ break;
+ default:
+ CAM_ERR(CAM_CCI, "failed: %d", type);
+ num_bytes = 0;
+ break;
+ }
+ return num_bytes;
+}
+
+static void cam_cci_flush_queue(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master)
+{
+ int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
+ cam_io_w_mb(1 << master, base + CCI_HALT_REQ_ADDR);
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "wait failed");
+ } else if (rc == 0) {
+ CAM_ERR(CAM_CCI, "wait timeout");
+
+ /* Set reset pending flag to TRUE */
+ cci_dev->cci_master_info[master].reset_pending = TRUE;
+
+ /* Set proper mask to RESET CMD address based on MASTER */
+ if (master == MASTER_0)
+ cam_io_w_mb(CCI_M0_RESET_RMSK,
+ base + CCI_RESET_CMD_ADDR);
+ else
+ cam_io_w_mb(CCI_M1_RESET_RMSK,
+ base + CCI_RESET_CMD_ADDR);
+
+ /* wait for reset done irq */
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0)
+ CAM_ERR(CAM_CCI, "wait failed %d", rc);
+ }
+}
+
+static int32_t cam_cci_validate_queue(struct cci_device *cci_dev,
+ uint32_t len,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ uint32_t read_val = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+ unsigned long flags;
+
+ read_val = cam_io_r_mb(base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d",
+ read_val, len,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+ if ((read_val + len + 1) >
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size) {
+ uint32_t reg_val = 0;
+ uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+ CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD");
+ cam_io_w_mb(report_val,
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ read_val++;
+ CAM_DBG(CAM_CCI,
+ "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d",
+ read_val, queue);
+ cam_io_w_mb(read_val, base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ reg_val = 1 << ((master * 2) + queue);
+ CAM_DBG(CAM_CCI, "CCI_QUEUE_START_ADDR");
+ spin_lock_irqsave(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ atomic_set(
+ &cci_dev->cci_master_info[master].done_pending[queue],
+ 1);
+ cam_io_w_mb(reg_val, base +
+ CCI_QUEUE_START_ADDR);
+ CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].report_q[queue],
+ CCI_TIMEOUT);
+ if (rc <= 0) {
+ CAM_ERR(CAM_CCI, "Wait_for_completion_timeout: rc: %d",
+ rc);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ cam_cci_flush_queue(cci_dev, master);
+ return rc;
+ }
+ rc = cci_dev->cci_master_info[master].status;
+ if (rc < 0)
+ CAM_ERR(CAM_CCI, "Failed rc %d", rc);
+ }
+
+ return rc;
+}
+
+static int32_t cam_cci_write_i2c_queue(struct cci_device *cci_dev,
+ uint32_t val,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
+ if (!cci_dev) {
+ CAM_ERR(CAM_CCI, "Failed");
+ return -EINVAL;
+ }
+
+ rc = cam_cci_validate_queue(cci_dev, 1, master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Failed %d", rc);
+ return rc;
+ }
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset, val);
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ return rc;
+}
+
+static int32_t cam_cci_lock_queue(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue, uint32_t en)
+{
+ uint32_t val;
+
+ if (queue != PRIORITY_QUEUE)
+ return 0;
+
+ val = en ? CCI_I2C_LOCK_CMD : CCI_I2C_UNLOCK_CMD;
+ return cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+}
+
+#ifdef DUMP_CCI_REGISTERS
+static void cam_cci_dump_registers(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master, enum cci_i2c_queue_t queue)
+{
+ uint32_t read_val = 0;
+ uint32_t i = 0;
+ uint32_t reg_offset = 0;
+ void __iomem *base = cci_dev->soc_info.reg_map[0].mem_base;
+
+ /* CCI Top Registers */
+ CAM_INFO(CAM_CCI, "****CCI TOP Registers ****");
+ for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) {
+ reg_offset = DEBUG_TOP_REG_START + i * 4;
+ read_val = cam_io_r_mb(base + reg_offset);
+ CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
+ }
+
+ /* CCI Master registers */
+ CAM_INFO(CAM_CCI, "****CCI MASTER %d Registers ****",
+ master);
+ for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
+ reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
+ read_val = cam_io_r_mb(base + reg_offset);
+ CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
+ }
+
+ /* CCI Master Queue registers */
+ CAM_INFO(CAM_CCI, " **** CCI MASTER%d QUEUE%d Registers ****",
+ master, queue);
+ for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) {
+ reg_offset = DEBUG_MASTER_QUEUE_REG_START + master*0x200 +
+ queue*0x100 + i * 4;
+ read_val = cam_io_r_mb(base + reg_offset);
+ CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
+ }
+
+ /* CCI Interrupt registers */
+ CAM_INFO(CAM_CCI, " ****CCI Interrupt Registers ****");
+ for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) {
+ reg_offset = DEBUG_INTR_REG_START + i * 4;
+ read_val = cam_io_r_mb(base + reg_offset);
+ CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
+ }
+}
+#endif
+
+static uint32_t cam_cci_wait(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+
+ if (!cci_dev) {
+ CAM_ERR(CAM_CCI, "failed");
+ return -EINVAL;
+ }
+
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].report_q[queue], CCI_TIMEOUT);
+ CAM_DBG(CAM_CCI, "wait DONE_for_completion_timeout");
+
+ if (rc <= 0) {
+#ifdef DUMP_CCI_REGISTERS
+ cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+ CAM_ERR(CAM_CCI, "wait for queue: %d", queue);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ cam_cci_flush_queue(cci_dev, master);
+ return rc;
+ }
+ rc = cci_dev->cci_master_info[master].status;
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void cam_cci_load_report_cmd(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ uint32_t read_val = cam_io_r_mb(base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+ CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD curr_w_cnt: %d", read_val);
+ cam_io_w_mb(report_val,
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ read_val++;
+
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d", read_val);
+ cam_io_w_mb(read_val, base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+}
+
+static int32_t cam_cci_wait_report_cmd(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ unsigned long flags;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
+ uint32_t reg_val = 1 << ((master * 2) + queue);
+
+ cam_cci_load_report_cmd(cci_dev, master, queue);
+ spin_lock_irqsave(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ cam_io_w_mb(reg_val, base +
+ CCI_QUEUE_START_ADDR);
+
+ return cam_cci_wait(cci_dev, master, queue);
+}
+
+static int32_t cam_cci_transfer_end(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
+ return rc;
+ }
+ rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
+ return rc;
+ }
+ } else {
+ atomic_set(
+ &cci_dev->cci_master_info[master].done_pending[queue],
+ 1);
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ rc = cam_cci_wait(cci_dev, master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
+ return rc;
+ }
+ rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
+ return rc;
+ }
+ rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Failed rc %d", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int32_t cam_cci_get_queue_free_size(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t read_val = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
+ read_val = cam_io_r_mb(base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d", read_val,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+ return ((cci_dev->cci_i2c_queue_info[master][queue].max_queue_size) -
+ read_val);
+}
+
+static void cam_cci_process_half_q(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ unsigned long flags;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+ uint32_t reg_val = 1 << ((master * 2) + queue);
+
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue],
+ flags);
+ if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
+ cam_cci_load_report_cmd(cci_dev, master, queue);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ cam_io_w_mb(reg_val, base +
+ CCI_QUEUE_START_ADDR);
+ }
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].lock_q[queue],
+ flags);
+}
+
+static int32_t cam_cci_process_full_q(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue],
+ flags);
+ if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 1) {
+ atomic_set(
+ &cci_dev->cci_master_info[master].done_pending[queue],
+ 1);
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ rc = cam_cci_wait(cci_dev, master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
+ return rc;
+ }
+ } else {
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[master].lock_q[queue], flags);
+ rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int32_t cam_cci_calc_cmd_len(struct cci_device *cci_dev,
+ struct cam_cci_ctrl *c_ctrl, uint32_t cmd_size,
+ struct cam_sensor_i2c_reg_array *i2c_cmd, uint32_t *pack)
+{
+ uint8_t i;
+ uint32_t len = 0;
+ uint8_t data_len = 0, addr_len = 0;
+ uint8_t pack_max_len;
+ struct cam_sensor_i2c_reg_setting *msg;
+ struct cam_sensor_i2c_reg_array *cmd = i2c_cmd;
+ uint32_t size = cmd_size;
+
+ if (!cci_dev || !c_ctrl) {
+ CAM_ERR(CAM_CCI, "failed");
+ return -EINVAL;
+ }
+
+ msg = &c_ctrl->cfg.cci_i2c_write_cfg;
+ *pack = 0;
+
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) {
+ addr_len = cam_cci_convert_type_to_num_bytes(msg->addr_type);
+ len = (size + addr_len) <= (cci_dev->payload_size) ?
+ (size + addr_len):cci_dev->payload_size;
+ } else {
+ addr_len = cam_cci_convert_type_to_num_bytes(msg->addr_type);
+ data_len = cam_cci_convert_type_to_num_bytes(msg->data_type);
+ len = data_len + addr_len;
+ pack_max_len = size < (cci_dev->payload_size-len) ?
+ size : (cci_dev->payload_size-len);
+ for (i = 0; i < pack_max_len;) {
+ if (cmd->delay || ((cmd - i2c_cmd) >= (cmd_size - 1)))
+ break;
+ if (cmd->reg_addr + 1 ==
+ (cmd+1)->reg_addr) {
+ len += data_len;
+ if (len > cci_dev->payload_size) {
+ len = len - data_len;
+ break;
+ }
+ (*pack)++;
+ } else {
+ break;
+ }
+ i += data_len;
+ cmd++;
+ }
+ }
+
+ if (len > cci_dev->payload_size) {
+ CAM_ERR(CAM_CCI, "Len error: %d", len);
+ return -EINVAL;
+ }
+
+ len += 1; /*add i2c WR command*/
+ len = len/4 + 1;
+
+ return len;
+}
+
+static uint32_t cam_cci_cycles_per_ms(unsigned long clk)
+{
+ uint32_t cycles_per_us;
+
+ if (clk) {
+ cycles_per_us = ((clk/1000)*256)/1000;
+ } else {
+ CAM_ERR(CAM_CCI, "failed: Can use default: %d",
+ CYCLES_PER_MICRO_SEC_DEFAULT);
+ cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+ }
+
+ return cycles_per_us;
+}
+
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
+ struct cam_cci_ctrl *c_ctrl)
+
+{
+ int32_t src_clk_idx, j;
+ uint32_t cci_clk_src;
+ unsigned long clk;
+ struct cam_cci_clk_params_t *clk_params = NULL;
+
+ enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+ struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
+
+ if (i2c_freq_mode >= I2C_MAX_MODES ||
+ i2c_freq_mode < I2C_STANDARD_MODE) {
+ CAM_ERR(CAM_CCI, "Invalid frequency mode: %d",
+ (int32_t)i2c_freq_mode);
+ cci_dev->clk_level_index = -1;
+ return;
+ }
+
+ clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+ cci_clk_src = clk_params->cci_clk_src;
+
+ src_clk_idx = soc_info->src_clk_idx;
+
+ if (src_clk_idx < 0) {
+ cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+ cci_dev->clk_level_index = 0;
+ return;
+ }
+
+ if (cci_clk_src == 0) {
+ clk = soc_info->clk_rate[0][src_clk_idx];
+ cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
+ cci_dev->clk_level_index = 0;
+ return;
+ }
+
+ for (j = 0; j < CAM_MAX_VOTE; j++) {
+ clk = soc_info->clk_rate[j][src_clk_idx];
+ if (clk == cci_clk_src) {
+ cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
+ cci_dev->clk_level_index = j;
+ return;
+ }
+ }
+}
+
+static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
+ struct cam_cci_ctrl *c_ctrl)
+{
+ struct cam_cci_clk_params_t *clk_params = NULL;
+ enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+ enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
+ if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
+ CAM_ERR(CAM_CCI, "invalid i2c_freq_mode = %d", i2c_freq_mode);
+ return -EINVAL;
+ }
+
+ clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+
+ if (cci_dev->i2c_freq_mode[master] == i2c_freq_mode)
+ return 0;
+ if (master == MASTER_0) {
+ cam_io_w_mb(clk_params->hw_thigh << 16 |
+ clk_params->hw_tlow,
+ base + CCI_I2C_M0_SCL_CTL_ADDR);
+ cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
+ clk_params->hw_tsu_sta,
+ base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+ cam_io_w_mb(clk_params->hw_thd_dat << 16 |
+ clk_params->hw_thd_sta,
+ base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+ cam_io_w_mb(clk_params->hw_tbuf,
+ base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+ cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+ clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+ base + CCI_I2C_M0_MISC_CTL_ADDR);
+ } else if (master == MASTER_1) {
+ cam_io_w_mb(clk_params->hw_thigh << 16 |
+ clk_params->hw_tlow,
+ base + CCI_I2C_M1_SCL_CTL_ADDR);
+ cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
+ clk_params->hw_tsu_sta,
+ base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+ cam_io_w_mb(clk_params->hw_thd_dat << 16 |
+ clk_params->hw_thd_sta,
+ base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+ cam_io_w_mb(clk_params->hw_tbuf,
+ base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+ cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+ clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+ base + CCI_I2C_M1_MISC_CTL_ADDR);
+ }
+ cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
+
+ return 0;
+}
+
+static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
+ struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
+ int32_t rc = 0, free_size = 0, en_seq_write = 0;
+ uint8_t data[12];
+ struct cam_sensor_i2c_reg_setting *i2c_msg =
+ &c_ctrl->cfg.cci_i2c_write_cfg;
+ struct cam_sensor_i2c_reg_array *i2c_cmd = i2c_msg->reg_setting;
+ enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+ uint16_t reg_addr = 0, cmd_size = i2c_msg->size;
+ uint32_t read_val = 0, reg_offset, val, delay = 0;
+ uint32_t max_queue_size, queue_size = 0, cmd = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+ unsigned long flags;
+
+ if (i2c_cmd == NULL) {
+ CAM_ERR(CAM_CCI, "Failed: i2c cmd is NULL");
+ return -EINVAL;
+ }
+
+ if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
+ CAM_ERR(CAM_CCI, "failed: invalid cmd_size %d",
+ cmd_size);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_CCI, "addr type %d data type %d cmd_size %d",
+ i2c_msg->addr_type, i2c_msg->data_type, cmd_size);
+
+ if (i2c_msg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ CAM_ERR(CAM_CCI, "failed: invalid addr_type 0x%X",
+ i2c_msg->addr_type);
+ return -EINVAL;
+ }
+ if (i2c_msg->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ CAM_ERR(CAM_CCI, "failed: invalid data_type 0x%X",
+ i2c_msg->data_type);
+ return -EINVAL;
+ }
+ reg_offset = master * 0x200 + queue * 0x100;
+
+ cam_io_w_mb(cci_dev->cci_wait_sync_cfg.cid,
+ base + CCI_SET_CID_SYNC_TIMER_ADDR +
+ cci_dev->cci_wait_sync_cfg.csid *
+ CCI_SET_CID_SYNC_TIMER_OFFSET);
+
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset, val);
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue],
+ flags);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].lock_q[queue],
+ flags);
+
+ max_queue_size =
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size;
+
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ)
+ queue_size = max_queue_size;
+ else
+ queue_size = max_queue_size/2;
+ reg_addr = i2c_cmd->reg_addr;
+
+ if (sync_en == MSM_SYNC_ENABLE && cci_dev->valid_sync &&
+ cmd_size < max_queue_size) {
+ val = CCI_I2C_WAIT_SYNC_CMD |
+ ((cci_dev->cci_wait_sync_cfg.line) << 4);
+ cam_io_w_mb(val,
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ }
+
+ rc = cam_cci_lock_queue(cci_dev, master, queue, 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed line %d", rc);
+ return rc;
+ }
+
+ while (cmd_size) {
+ uint32_t pack = 0;
+
+ len = cam_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size,
+ i2c_cmd, &pack);
+ if (len <= 0) {
+ CAM_ERR(CAM_CCI, "failed");
+ return -EINVAL;
+ }
+
+ read_val = cam_io_r_mb(base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CAM_DBG(CAM_CCI, "CUR_WORD_CNT_ADDR %d len %d max %d",
+ read_val, len, max_queue_size);
+ /* + 1 - space alocation for Report CMD */
+ if ((read_val + len + 1) > queue_size) {
+ if ((read_val + len + 1) > max_queue_size) {
+ rc = cam_cci_process_full_q(cci_dev,
+ master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
+ return rc;
+ }
+ continue;
+ }
+ cam_cci_process_half_q(cci_dev, master, queue);
+ }
+
+ CAM_DBG(CAM_CCI, "cmd_size %d addr 0x%x data 0x%x",
+ cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
+ delay = i2c_cmd->delay;
+ i = 0;
+ data[i++] = CCI_I2C_WRITE_CMD;
+
+ /*
+ * in case of multiple command
+ * MSM_CCI_I2C_WRITE : address is not continuous, so update
+ * address for a new packet.
+ * MSM_CCI_I2C_WRITE_SEQ : address is continuous, need to keep
+ * the incremented address for a
+ * new packet
+ */
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_ASYNC ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC_BLOCK)
+ reg_addr = i2c_cmd->reg_addr;
+
+ if (en_seq_write == 0) {
+ /* either byte or word addr */
+ if (i2c_msg->addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+ data[i++] = reg_addr;
+ else {
+ data[i++] = (reg_addr & 0xFF00) >> 8;
+ data[i++] = reg_addr & 0x00FF;
+ }
+ }
+ /* max of 10 data bytes */
+ do {
+ if (i2c_msg->data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ data[i++] = i2c_cmd->reg_data;
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ)
+ reg_addr++;
+ } else {
+ if ((i + 1) <= cci_dev->payload_size) {
+ switch (i2c_msg->data_type) {
+ case CAMERA_SENSOR_I2C_TYPE_DWORD:
+ data[i++] = (i2c_cmd->reg_data &
+ 0xFF000000) >> 24;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_3B:
+ data[i++] = (i2c_cmd->reg_data &
+ 0x00FF0000) >> 16;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_WORD:
+ data[i++] = (i2c_cmd->reg_data &
+ 0x0000FF00) >> 8;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_BYTE:
+ data[i++] = i2c_cmd->reg_data &
+ 0x000000FF;
+ break;
+ default:
+ CAM_ERR(CAM_CCI,
+ "invalid data type: %d",
+ i2c_msg->data_type);
+ return -EINVAL;
+ }
+
+ if (c_ctrl->cmd ==
+ MSM_CCI_I2C_WRITE_SEQ)
+ reg_addr++;
+ } else
+ break;
+ }
+ i2c_cmd++;
+ --cmd_size;
+ } while (((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) || pack--) &&
+ (cmd_size > 0) && (i <= cci_dev->payload_size));
+ free_size = cam_cci_get_queue_free_size(cci_dev, master,
+ queue);
+ if ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) &&
+ ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) &&
+ cci_dev->support_seq_write && cmd_size > 0 &&
+ free_size > BURST_MIN_FREE_SIZE) {
+ data[0] |= 0xF0;
+ en_seq_write = 1;
+ } else {
+ data[0] |= ((i-1) << 4);
+ en_seq_write = 0;
+ }
+ len = ((i-1)/4) + 1;
+
+ read_val = cam_io_r_mb(base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ for (h = 0, k = 0; h < len; h++) {
+ cmd = 0;
+ for (j = 0; (j < 4 && k < i); j++)
+ cmd |= (data[k++] << (j * 8));
+ CAM_DBG(CAM_CCI,
+ "LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d",
+ cmd, queue, len, read_val);
+ cam_io_w_mb(cmd, base +
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ master * 0x200 + queue * 0x100);
+
+ read_val += 1;
+ cam_io_w_mb(read_val, base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ }
+
+ if ((delay > 0) && (delay < CCI_MAX_DELAY) &&
+ en_seq_write == 0) {
+ cmd = (uint32_t)((delay * cci_dev->cycles_per_us) /
+ 0x100);
+ cmd <<= 4;
+ cmd |= CCI_I2C_WAIT_CMD;
+ CAM_DBG(CAM_CCI,
+ "CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x", cmd);
+ cam_io_w_mb(cmd, base +
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ master * 0x200 + queue * 0x100);
+ read_val += 1;
+ cam_io_w_mb(read_val, base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ }
+ }
+
+ rc = cam_cci_transfer_end(cci_dev, master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t val = 0, i = 0, j = 0, irq_mask_update = 0;
+ unsigned long rem_jiffies, flags;
+ int32_t read_words = 0, exp_words = 0;
+ int32_t index = 0, first_byte = 0, total_read_words = 0;
+ enum cci_i2c_master_t master;
+ enum cci_i2c_queue_t queue = QUEUE_1;
+ struct cci_device *cci_dev = NULL;
+ struct cam_cci_read_cfg *read_cfg = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+ return -EINVAL;
+ }
+
+ soc_info = &cci_dev->soc_info;
+ base = soc_info->reg_map[0].mem_base;
+ mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+ /*
+ * Todo: If there is a change in frequency of operation
+ * Wait for previos transaction to complete
+ */
+
+ /* Set the I2C Frequency */
+ rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+ goto rel_mutex;
+ }
+
+ /*
+ * Call validate queue to make sure queue is empty before starting.
+ * If this call fails, don't proceed with i2c_read call. This is to
+ * avoid overflow / underflow of queue
+ */
+ rc = cam_cci_validate_queue(cci_dev,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+ master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
+ goto rel_mutex;
+ }
+
+ if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+ CAM_ERR(CAM_CCI, "More than max retries");
+ goto rel_mutex;
+ }
+
+ if (read_cfg->data == NULL) {
+ CAM_ERR(CAM_CCI, "Data ptr is NULL");
+ goto rel_mutex;
+ }
+
+ if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+ read_cfg->addr_type);
+ rc = -EINVAL;
+ goto rel_mutex;
+ }
+
+ CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+ c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+ c_ctrl->cci_info->id_map);
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_LOCK_CMD;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+ for (i = 0; i < read_cfg->addr_type; i++) {
+ val |= ((read_cfg->addr >> (i << 3)) & 0xFF) <<
+ ((read_cfg->addr_type - i) << 3);
+ }
+
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_UNLOCK_CMD;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+ CAM_DBG(CAM_CCI, "cur word cnt 0x%x", val);
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+
+ val = 1 << ((master * 2) + queue);
+ cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+
+ exp_words = ((read_cfg->num_byte / 4) + 1);
+ CAM_DBG(CAM_CCI, "waiting for threshold [exp_words %d]", exp_words);
+
+ while (total_read_words != exp_words) {
+ rem_jiffies = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].th_complete,
+ CCI_TIMEOUT);
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR +
+ master * 0x100);
+ CAM_ERR(CAM_CCI,
+ "wait_for_completion_timeout rc = %d FIFO buf_lvl:0x%x",
+ rc, val);
+#ifdef DUMP_CCI_REGISTERS
+ cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+ cam_cci_flush_queue(cci_dev, master);
+ goto rel_mutex;
+ }
+
+ read_words = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+ if (read_words <= 0) {
+ CAM_DBG(CAM_CCI, "FIFO Buffer lvl is 0");
+ continue;
+ }
+
+ j++;
+ CAM_DBG(CAM_CCI, "Iteration: %u read_words %d", j, read_words);
+
+ total_read_words += read_words;
+ while (read_words > 0) {
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+ for (i = 0; (i < 4) &&
+ (index < read_cfg->num_byte); i++) {
+ CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
+ if (!first_byte) {
+ CAM_DBG(CAM_CCI, "sid 0x%x",
+ val & 0xFF);
+ first_byte++;
+ } else {
+ read_cfg->data[index] =
+ (val >> (i * 8)) & 0xFF;
+ CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
+ read_cfg->data[index]);
+ index++;
+ }
+ }
+ read_words--;
+ }
+
+ CAM_DBG(CAM_CCI, "Iteraion:%u total_read_words %d",
+ j, total_read_words);
+
+ spin_lock_irqsave(&cci_dev->lock_status, flags);
+ if (cci_dev->irqs_disabled) {
+ irq_mask_update =
+ cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR) |
+ CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+ if (master == MASTER_0 && cci_dev->irqs_disabled &
+ CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)
+ irq_mask_update |=
+ CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+ else if (master == MASTER_1 && cci_dev->irqs_disabled &
+ CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)
+ irq_mask_update |=
+ CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+ cam_io_w_mb(irq_mask_update,
+ base + CCI_IRQ_MASK_1_ADDR);
+ }
+ spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+
+ if (total_read_words == exp_words) {
+ /*
+ * This wait is for RD_DONE irq, if RD_DONE is
+ * triggered we will call complete on both threshold
+ * & read done waits. As part of the threshold wait
+ * we will be draining the entire buffer out. This
+ * wait is to compensate for the complete invoked for
+ * RD_DONE exclusively.
+ */
+ rem_jiffies = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete,
+ CCI_TIMEOUT);
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR +
+ master * 0x100);
+ CAM_ERR(CAM_CCI,
+ "Failed to receive RD_DONE irq rc = %d FIFO buf_lvl:0x%x",
+ rc, val);
+ #ifdef DUMP_CCI_REGISTERS
+ cam_cci_dump_registers(cci_dev,
+ master, queue);
+ #endif
+ cam_cci_flush_queue(cci_dev, master);
+ goto rel_mutex;
+ }
+ break;
+ }
+ }
+
+ CAM_DBG(CAM_CCI, "Burst read successful words_read %d",
+ total_read_words);
+
+rel_mutex:
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+ return rc;
+}
+
+static int32_t cam_cci_read(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+ int32_t read_words = 0, exp_words = 0;
+ int32_t index = 0, first_byte = 0;
+ uint32_t i = 0;
+ enum cci_i2c_master_t master;
+ enum cci_i2c_queue_t queue = QUEUE_1;
+ struct cci_device *cci_dev = NULL;
+ struct cam_cci_read_cfg *read_cfg = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+ return -EINVAL;
+ }
+
+ soc_info = &cci_dev->soc_info;
+ base = soc_info->reg_map[0].mem_base;
+
+ mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+ /*
+ * Todo: If there is a change in frequency of operation
+ * Wait for previos transaction to complete
+ */
+
+ /* Set the I2C Frequency */
+ rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+ goto rel_mutex;
+ }
+
+ /*
+ * Call validate queue to make sure queue is empty before starting.
+ * If this call fails, don't proceed with i2c_read call. This is to
+ * avoid overflow / underflow of queue
+ */
+ rc = cam_cci_validate_queue(cci_dev,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+ master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
+ goto rel_mutex;
+ }
+
+ if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+ CAM_ERR(CAM_CCI, "More than max retries");
+ goto rel_mutex;
+ }
+
+ if (read_cfg->data == NULL) {
+ CAM_ERR(CAM_CCI, "Data ptr is NULL");
+ goto rel_mutex;
+ }
+
+ CAM_DBG(CAM_CCI, "master %d, queue %d", master, queue);
+ CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+ c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+ c_ctrl->cci_info->id_map);
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_LOCK_CMD;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+ read_cfg->addr_type);
+ rc = -EINVAL;
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+ for (i = 0; i < read_cfg->addr_type; i++) {
+ val |= ((read_cfg->addr >> (i << 3)) & 0xFF) <<
+ ((read_cfg->addr_type - i) << 3);
+ }
+
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = CCI_I2C_UNLOCK_CMD;
+ rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+ goto rel_mutex;
+ }
+
+ val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+ CAM_DBG(CAM_CCI, "cur word cnt 0x%x", val);
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+
+ val = 1 << ((master * 2) + queue);
+ cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+ CAM_DBG(CAM_CCI,
+ "waiting_for_rd_done [exp_words: %d]", exp_words);
+
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
+ if (rc <= 0) {
+#ifdef DUMP_CCI_REGISTERS
+ cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+ CAM_ERR(CAM_CCI,
+ "wait_for_completion_timeout rc = %d FIFO buf_lvl: 0x%x",
+ rc, val);
+ cam_cci_flush_queue(cci_dev, master);
+ goto rel_mutex;
+ } else {
+ rc = 0;
+ }
+
+ read_words = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+ exp_words = ((read_cfg->num_byte / 4) + 1);
+ if (read_words != exp_words) {
+ CAM_ERR(CAM_CCI, "read_words = %d, exp words = %d",
+ read_words, exp_words);
+ memset(read_cfg->data, 0, read_cfg->num_byte);
+ rc = -EINVAL;
+ goto rel_mutex;
+ }
+ index = 0;
+ CAM_DBG(CAM_CCI, "index %d num_type %d", index, read_cfg->num_byte);
+ first_byte = 0;
+ while (read_words > 0) {
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+ CAM_DBG(CAM_CCI, "read val 0x%x", val);
+ for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
+ CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
+ if (!first_byte) {
+ CAM_DBG(CAM_CCI, "sid 0x%x", val & 0xFF);
+ first_byte++;
+ } else {
+ read_cfg->data[index] =
+ (val >> (i * 8)) & 0xFF;
+ CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
+ read_cfg->data[index]);
+ index++;
+ }
+ }
+ read_words--;
+ }
+rel_mutex:
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+ return rc;
+}
+
+static int32_t cam_cci_i2c_write(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+
+ if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+ CAM_ERR(CAM_CCI, "invalid cci state %d",
+ cci_dev->cci_state);
+ return -EINVAL;
+ }
+ master = c_ctrl->cci_info->cci_i2c_master;
+ CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+ c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+ c_ctrl->cci_info->id_map);
+
+ /* Set the I2C Frequency */
+ rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+ return rc;
+ }
+ /*
+ * Call validate queue to make sure queue is empty before starting.
+ * If this call fails, don't proceed with i2c_write call. This is to
+ * avoid overflow / underflow of queue
+ */
+ rc = cam_cci_validate_queue(cci_dev,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1,
+ master, queue);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Initial validataion failed rc %d",
+ rc);
+ return rc;
+ }
+ if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+ CAM_ERR(CAM_CCI, "More than max retries");
+ return rc;
+ }
+ rc = cam_cci_data_queue(cci_dev, c_ctrl, queue, sync_en);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void cam_cci_write_async_helper(struct work_struct *work)
+{
+ int rc;
+ struct cci_device *cci_dev;
+ struct cci_write_async *write_async =
+ container_of(work, struct cci_write_async, work);
+ struct cam_sensor_i2c_reg_setting *i2c_msg;
+ enum cci_i2c_master_t master;
+ struct cam_cci_master_info *cci_master_info;
+
+ cci_dev = write_async->cci_dev;
+ i2c_msg = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+ master = write_async->c_ctrl.cci_info->cci_i2c_master;
+ cci_master_info = &cci_dev->cci_master_info[master];
+
+ mutex_lock(&cci_master_info->mutex_q[write_async->queue]);
+ rc = cam_cci_i2c_write(&(cci_dev->v4l2_dev_str.sd),
+ &write_async->c_ctrl, write_async->queue, write_async->sync_en);
+ mutex_unlock(&cci_master_info->mutex_q[write_async->queue]);
+ if (rc < 0)
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
+
+ kfree(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting);
+ kfree(write_async);
+}
+
+static int32_t cam_cci_i2c_write_async(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ int32_t rc = 0;
+ struct cci_write_async *write_async;
+ struct cci_device *cci_dev;
+ struct cam_sensor_i2c_reg_setting *cci_i2c_write_cfg;
+ struct cam_sensor_i2c_reg_setting *cci_i2c_write_cfg_w;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+
+ write_async = kzalloc(sizeof(*write_async), GFP_KERNEL);
+ if (!write_async)
+ return -ENOMEM;
+
+
+ INIT_WORK(&write_async->work, cam_cci_write_async_helper);
+ write_async->cci_dev = cci_dev;
+ write_async->c_ctrl = *c_ctrl;
+ write_async->queue = queue;
+ write_async->sync_en = sync_en;
+
+ cci_i2c_write_cfg = &c_ctrl->cfg.cci_i2c_write_cfg;
+ cci_i2c_write_cfg_w = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+
+ if (cci_i2c_write_cfg->size == 0) {
+ kfree(write_async);
+ return -EINVAL;
+ }
+
+ cci_i2c_write_cfg_w->reg_setting =
+ kzalloc(sizeof(struct cam_sensor_i2c_reg_array)*
+ cci_i2c_write_cfg->size, GFP_KERNEL);
+ if (!cci_i2c_write_cfg_w->reg_setting) {
+ CAM_ERR(CAM_CCI, "Couldn't allocate memory");
+ kfree(write_async);
+ return -ENOMEM;
+ }
+ memcpy(cci_i2c_write_cfg_w->reg_setting,
+ cci_i2c_write_cfg->reg_setting,
+ (sizeof(struct cam_sensor_i2c_reg_array)*
+ cci_i2c_write_cfg->size));
+
+ cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type;
+ cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type;
+ cci_i2c_write_cfg_w->data_type = cci_i2c_write_cfg->data_type;
+ cci_i2c_write_cfg_w->size = cci_i2c_write_cfg->size;
+ cci_i2c_write_cfg_w->delay = cci_i2c_write_cfg->delay;
+
+ queue_work(cci_dev->write_wq[write_async->queue], &write_async->work);
+
+ return rc;
+}
+
+static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev = NULL;
+ enum cci_i2c_master_t master;
+ struct cam_cci_read_cfg *read_cfg = NULL;
+ uint16_t read_bytes = 0;
+
+ if (!sd || !c_ctrl) {
+ CAM_ERR(CAM_CCI, "sd %pK c_ctrl %pK", sd, c_ctrl);
+ return -EINVAL;
+ }
+ if (!c_ctrl->cci_info) {
+ CAM_ERR(CAM_CCI, "cci_info NULL");
+ return -EINVAL;
+ }
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev) {
+ CAM_ERR(CAM_CCI, "cci_dev NULL");
+ return -EINVAL;
+ }
+ if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+ CAM_ERR(CAM_CCI, "invalid cci state %d", cci_dev->cci_state);
+ return -EINVAL;
+ }
+
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+ return -EINVAL;
+ }
+
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+ if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
+ CAM_ERR(CAM_CCI, "read num bytes 0");
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ read_bytes = read_cfg->num_byte;
+
+ /*
+ * To avoid any conflicts due to back to back trigger of
+ * THRESHOLD irq's, we reinit the threshold wait before
+ * we load the burst read cmd.
+ */
+ reinit_completion(&cci_dev->cci_master_info[master].th_complete);
+
+ CAM_DBG(CAM_CCI, "Bytes to read %u", read_bytes);
+ do {
+ if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT)
+ read_cfg->num_byte = CCI_I2C_MAX_BYTE_COUNT;
+ else
+ read_cfg->num_byte = read_bytes;
+
+ if (read_cfg->num_byte >= CCI_READ_MAX) {
+ cci_dev->is_burst_read = true;
+ rc = cam_cci_burst_read(sd, c_ctrl);
+ } else {
+ cci_dev->is_burst_read = false;
+ rc = cam_cci_read(sd, c_ctrl);
+ }
+ if (rc) {
+ CAM_ERR(CAM_CCI, "failed to read rc:%d", rc);
+ goto ERROR;
+ }
+
+ if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT) {
+ read_cfg->addr += (CCI_I2C_MAX_BYTE_COUNT /
+ read_cfg->data_type);
+ read_cfg->data += CCI_I2C_MAX_BYTE_COUNT;
+ read_bytes -= CCI_I2C_MAX_BYTE_COUNT;
+ } else {
+ read_bytes = 0;
+ }
+ } while (read_bytes);
+
+ERROR:
+ cci_dev->is_burst_read = false;
+ return rc;
+}
+
+static int32_t cam_cci_i2c_set_sync_prms(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+ cci_dev->cci_wait_sync_cfg = c_ctrl->cfg.cci_wait_sync_cfg;
+ cci_dev->valid_sync = cci_dev->cci_wait_sync_cfg.csid < 0 ? 0 : 1;
+
+ return rc;
+}
+
+static int32_t cam_cci_release(struct v4l2_subdev *sd)
+{
+ uint8_t rc = 0;
+ struct cci_device *cci_dev;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+
+ rc = cam_cci_soc_release(cci_dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Failed in releasing the cci: %d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int32_t cam_cci_write(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master;
+ struct cam_cci_master_info *cci_master_info;
+ uint32_t i;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ master = c_ctrl->cci_info->cci_i2c_master;
+
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+ return -EINVAL;
+ }
+
+ cci_master_info = &cci_dev->cci_master_info[master];
+
+ switch (c_ctrl->cmd) {
+ case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+ mutex_lock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+ rc = cam_cci_i2c_write(sd, c_ctrl,
+ SYNC_QUEUE, MSM_SYNC_ENABLE);
+ mutex_unlock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+ break;
+ case MSM_CCI_I2C_WRITE_SYNC:
+ rc = cam_cci_i2c_write_async(sd, c_ctrl,
+ SYNC_QUEUE, MSM_SYNC_ENABLE);
+ break;
+ case MSM_CCI_I2C_WRITE:
+ case MSM_CCI_I2C_WRITE_SEQ:
+ case MSM_CCI_I2C_WRITE_BURST:
+ for (i = 0; i < NUM_QUEUES; i++) {
+ if (mutex_trylock(&cci_master_info->mutex_q[i])) {
+ rc = cam_cci_i2c_write(sd, c_ctrl, i,
+ MSM_SYNC_DISABLE);
+ mutex_unlock(&cci_master_info->mutex_q[i]);
+ return rc;
+ }
+ }
+ mutex_lock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+ rc = cam_cci_i2c_write(sd, c_ctrl,
+ PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+ mutex_unlock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+ break;
+ case MSM_CCI_I2C_WRITE_ASYNC:
+ rc = cam_cci_i2c_write_async(sd, c_ctrl,
+ PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+
+ return rc;
+}
+
+int32_t cam_cci_core_cfg(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *cci_ctrl)
+{
+ int32_t rc = 0;
+
+ CAM_DBG(CAM_CCI, "cmd %d", cci_ctrl->cmd);
+ switch (cci_ctrl->cmd) {
+ case MSM_CCI_INIT:
+ rc = cam_cci_init(sd, cci_ctrl);
+ break;
+ case MSM_CCI_RELEASE:
+ rc = cam_cci_release(sd);
+ break;
+ case MSM_CCI_I2C_READ:
+ rc = cam_cci_read_bytes(sd, cci_ctrl);
+ break;
+ case MSM_CCI_I2C_WRITE:
+ case MSM_CCI_I2C_WRITE_SEQ:
+ case MSM_CCI_I2C_WRITE_BURST:
+ case MSM_CCI_I2C_WRITE_SYNC:
+ case MSM_CCI_I2C_WRITE_ASYNC:
+ case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+ rc = cam_cci_write(sd, cci_ctrl);
+ break;
+ case MSM_CCI_GPIO_WRITE:
+ break;
+ case MSM_CCI_SET_SYNC_CID:
+ rc = cam_cci_i2c_set_sync_prms(sd, cci_ctrl);
+ break;
+
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+
+ cci_ctrl->status = rc;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.h
new file mode 100644
index 000000000000..18b2ec7d0cda
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_CCI_CORE_H_
+#define _CAM_CCI_CORE_H_
+
+#include <linux/irqreturn.h>
+#include <media/cam_sensor.h>
+#include "cam_cci_dev.h"
+#include "cam_cci_soc.h"
+
+/**
+ * @cci_dev: CCI device structure
+ * @c_ctrl: CCI control structure
+ *
+ * This API gets CCI clk rates
+ */
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
+ struct cam_cci_ctrl *c_ctrl);
+
+/**
+ * @sd: V4L2 sub device
+ * @c_ctrl: CCI control structure
+ *
+ * This API handles I2C operations for CCI
+ */
+int32_t cam_cci_core_cfg(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *cci_ctrl);
+
+/**
+ * @irq_num: IRQ number
+ * @data: CCI private structure
+ *
+ * This API handles CCI IRQs
+ */
+irqreturn_t cam_cci_irq(int irq_num, void *data);
+
+#endif /* _CAM_CCI_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.c
new file mode 100644
index 000000000000..5e522a8a5959
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -0,0 +1,505 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_cci_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_cci_soc.h"
+#include "cam_cci_core.h"
+
+#define CCI_MAX_DELAY 1000000
+
+static struct v4l2_subdev *g_cci_subdev[MAX_CCI];
+
+struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index)
+{
+ if (cci_dev_index < MAX_CCI)
+ return g_cci_subdev[cci_dev_index];
+ return NULL;
+}
+
+static long cam_cci_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+
+ if (arg == NULL) {
+ CAM_ERR(CAM_CCI, "Invalid Args");
+ return rc;
+ }
+
+ switch (cmd) {
+ case VIDIOC_MSM_CCI_CFG:
+ rc = cam_cci_core_cfg(sd, arg);
+ break;
+ case VIDIOC_CAM_CONTROL:
+ break;
+ default:
+ CAM_ERR(CAM_CCI, "Invalid ioctl cmd: %d", cmd);
+ rc = -ENOIOCTLCMD;
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cci_subdev_compat_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ return cam_cci_subdev_ioctl(sd, cmd, NULL);
+}
+#endif
+
+irqreturn_t cam_cci_irq(int irq_num, void *data)
+{
+ uint32_t irq_status0, irq_status1, reg_bmsk;
+ uint32_t irq_update_rd_done = 0;
+ struct cci_device *cci_dev = data;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+ unsigned long flags;
+ bool rd_done_th_assert = false;
+
+ irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
+ irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR);
+ CAM_DBG(CAM_CCI, "irq0:%x irq1:%x", irq_status0, irq_status1);
+
+ if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+ if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
+ cci_dev->cci_master_info[MASTER_0].reset_pending =
+ FALSE;
+ complete(
+ &cci_dev->cci_master_info[MASTER_0].reset_complete);
+ }
+ if (cci_dev->cci_master_info[MASTER_1].reset_pending == TRUE) {
+ cci_dev->cci_master_info[MASTER_1].reset_pending =
+ FALSE;
+ complete(
+ &cci_dev->cci_master_info[MASTER_1].reset_complete);
+ }
+ }
+
+ if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+ (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)) {
+ cci_dev->cci_master_info[MASTER_0].status = 0;
+ rd_done_th_assert = true;
+ complete(&cci_dev->cci_master_info[MASTER_0].th_complete);
+ complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+ }
+ if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+ (!rd_done_th_assert)) {
+ cci_dev->cci_master_info[MASTER_0].status = 0;
+ rd_done_th_assert = true;
+ if (cci_dev->is_burst_read)
+ complete(
+ &cci_dev->cci_master_info[MASTER_0].th_complete);
+ complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+ }
+ if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
+ (!rd_done_th_assert)) {
+ cci_dev->cci_master_info[MASTER_0].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_0].th_complete);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
+ struct cam_cci_master_info *cci_master_info;
+
+ cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+ spin_lock_irqsave(
+ &cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0],
+ flags);
+ atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_0]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+ }
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0],
+ flags);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
+ struct cam_cci_master_info *cci_master_info;
+
+ cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+ spin_lock_irqsave(
+ &cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1],
+ flags);
+ atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_1]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+ }
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1],
+ flags);
+ }
+ rd_done_th_assert = false;
+ if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+ (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)) {
+ cci_dev->cci_master_info[MASTER_1].status = 0;
+ rd_done_th_assert = true;
+ complete(&cci_dev->cci_master_info[MASTER_1].th_complete);
+ complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+ }
+ rd_done_th_assert = false;
+ if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+ (!rd_done_th_assert)) {
+ cci_dev->cci_master_info[MASTER_1].status = 0;
+ rd_done_th_assert = true;
+ if (cci_dev->is_burst_read)
+ complete(
+ &cci_dev->cci_master_info[MASTER_1].th_complete);
+ complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+ }
+ if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
+ (!rd_done_th_assert)) {
+ cci_dev->cci_master_info[MASTER_1].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_1].th_complete);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
+ struct cam_cci_master_info *cci_master_info;
+
+ cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+ spin_lock_irqsave(
+ &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0],
+ flags);
+ atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_0]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+ }
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0],
+ flags);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
+ struct cam_cci_master_info *cci_master_info;
+
+ cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+ spin_lock_irqsave(
+ &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1],
+ flags);
+ atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_1]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+ }
+ spin_unlock_irqrestore(
+ &cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1],
+ flags);
+ }
+ if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE)
+ CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_0");
+
+ if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE)
+ CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_1");
+
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+ cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+ cam_io_w_mb(CCI_M0_RESET_RMSK,
+ base + CCI_RESET_CMD_ADDR);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+ cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
+ cam_io_w_mb(CCI_M1_RESET_RMSK,
+ base + CCI_RESET_CMD_ADDR);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
+ cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
+ cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
+ base + CCI_HALT_REQ_ADDR);
+ CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq_status0);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
+ cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
+ cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
+ base + CCI_HALT_REQ_ADDR);
+ CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq_status0);
+
+ }
+
+ cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
+
+ reg_bmsk = CCI_IRQ_MASK_1_RMSK;
+ if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
+ !(irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK)) {
+ reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+ spin_lock_irqsave(&cci_dev->lock_status, flags);
+ cci_dev->irqs_disabled |=
+ CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+ spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+ }
+
+ if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
+ !(irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK)) {
+ reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+ spin_lock_irqsave(&cci_dev->lock_status, flags);
+ cci_dev->irqs_disabled |=
+ CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+ spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+ }
+
+ if (reg_bmsk != CCI_IRQ_MASK_1_RMSK) {
+ cam_io_w_mb(reg_bmsk, base + CCI_IRQ_MASK_1_ADDR);
+ CAM_DBG(CAM_CCI, "Updating the reg mask for irq1: 0x%x",
+ reg_bmsk);
+ } else if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK ||
+ irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+ spin_lock_irqsave(&cci_dev->lock_status, flags);
+ if (cci_dev->irqs_disabled &
+ CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) {
+ irq_update_rd_done |=
+ CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+ cci_dev->irqs_disabled &=
+ ~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+ }
+ spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+ }
+ if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+ spin_lock_irqsave(&cci_dev->lock_status, flags);
+ if (cci_dev->irqs_disabled &
+ CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) {
+ irq_update_rd_done |=
+ CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+ cci_dev->irqs_disabled &=
+ ~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+ }
+ spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+ }
+ }
+
+ if (irq_update_rd_done != 0) {
+ irq_update_rd_done |= cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR);
+ cam_io_w_mb(irq_update_rd_done, base + CCI_IRQ_MASK_1_ADDR);
+ }
+
+
+ cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
+ cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ return IRQ_HANDLED;
+}
+
+static int cam_cci_irq_routine(struct v4l2_subdev *sd, u32 status,
+ bool *handled)
+{
+ struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
+ irqreturn_t ret;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+
+ ret = cam_cci_irq(soc_info->irq_line->start, cci_dev);
+ *handled = TRUE;
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops cci_subdev_core_ops = {
+ .ioctl = cam_cci_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_cci_subdev_compat_ioctl,
+#endif
+ .interrupt_service_routine = cam_cci_irq_routine,
+};
+
+static const struct v4l2_subdev_ops cci_subdev_ops = {
+ .core = &cci_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cci_subdev_intern_ops;
+
+static struct v4l2_file_operations cci_v4l2_subdev_fops;
+
+static long cam_cci_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return cam_cci_subdev_ioctl(sd, cmd, NULL);
+}
+
+static long cam_cci_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, cam_cci_subdev_do_ioctl);
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cci_subdev_fops_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return v4l2_subdev_call(sd, core, ioctl, cmd, NULL);
+}
+#endif
+
+static int cam_cci_platform_probe(struct platform_device *pdev)
+{
+ struct cam_cpas_register_params cpas_parms;
+ struct cci_device *new_cci_dev;
+ struct cam_hw_soc_info *soc_info = NULL;
+ int rc = 0;
+
+ new_cci_dev = kzalloc(sizeof(struct cci_device),
+ GFP_KERNEL);
+ if (!new_cci_dev)
+ return -ENOMEM;
+
+ soc_info = &new_cci_dev->soc_info;
+
+ new_cci_dev->v4l2_dev_str.pdev = pdev;
+
+ soc_info->pdev = pdev;
+ soc_info->dev = &pdev->dev;
+ soc_info->dev_name = pdev->name;
+
+ rc = cam_cci_parse_dt_info(pdev, new_cci_dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Resource get Failed: %d", rc);
+ goto cci_no_resource;
+ }
+
+ new_cci_dev->v4l2_dev_str.internal_ops =
+ &cci_subdev_intern_ops;
+ new_cci_dev->v4l2_dev_str.ops =
+ &cci_subdev_ops;
+ strlcpy(new_cci_dev->device_name, CAMX_CCI_DEV_NAME,
+ sizeof(new_cci_dev->device_name));
+ new_cci_dev->v4l2_dev_str.name =
+ new_cci_dev->device_name;
+ new_cci_dev->v4l2_dev_str.sd_flags =
+ (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+ new_cci_dev->v4l2_dev_str.ent_function =
+ CAM_CCI_DEVICE_TYPE;
+ new_cci_dev->v4l2_dev_str.token =
+ new_cci_dev;
+
+ rc = cam_register_subdev(&(new_cci_dev->v4l2_dev_str));
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Fail with cam_register_subdev");
+ goto cci_no_resource;
+ }
+
+ platform_set_drvdata(pdev, &(new_cci_dev->v4l2_dev_str.sd));
+ v4l2_set_subdevdata(&new_cci_dev->v4l2_dev_str.sd, new_cci_dev);
+ if (soc_info->index >= MAX_CCI) {
+ CAM_ERR(CAM_CCI, "Invalid index: %d max supported:%d",
+ soc_info->index, MAX_CCI-1);
+ goto cci_no_resource;
+ }
+
+ g_cci_subdev[soc_info->index] = &new_cci_dev->v4l2_dev_str.sd;
+ CAM_ERR(CAM_CCI, "Device Type :%d", soc_info->index);
+
+ cam_register_subdev_fops(&cci_v4l2_subdev_fops);
+ cci_v4l2_subdev_fops.unlocked_ioctl = cam_cci_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ cci_v4l2_subdev_fops.compat_ioctl32 =
+ cam_cci_subdev_fops_compat_ioctl;
+#endif
+
+ cpas_parms.cam_cpas_client_cb = NULL;
+ cpas_parms.cell_index = soc_info->index;
+ cpas_parms.dev = &pdev->dev;
+ cpas_parms.userdata = new_cci_dev;
+ strlcpy(cpas_parms.identifier, "cci", CAM_HW_IDENTIFIER_LENGTH);
+ rc = cam_cpas_register_client(&cpas_parms);
+ if (rc) {
+ CAM_ERR(CAM_CCI, "CPAS registration failed");
+ goto cci_no_resource;
+ }
+ CAM_DBG(CAM_CCI, "CPAS registration successful handle=%d",
+ cpas_parms.client_handle);
+ new_cci_dev->cpas_handle = cpas_parms.client_handle;
+
+ return rc;
+cci_no_resource:
+ kfree(new_cci_dev);
+ return rc;
+}
+
+static int cam_cci_device_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ struct cci_device *cci_dev =
+ v4l2_get_subdevdata(subdev);
+
+ cam_cpas_unregister_client(cci_dev->cpas_handle);
+ cam_cci_soc_remove(pdev, cci_dev);
+ devm_kfree(&pdev->dev, cci_dev);
+ return 0;
+}
+
+static const struct of_device_id cam_cci_dt_match[] = {
+ {.compatible = "qcom,cci"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, cam_cci_dt_match);
+
+static struct platform_driver cci_driver = {
+ .probe = cam_cci_platform_probe,
+ .remove = cam_cci_device_remove,
+ .driver = {
+ .name = CAMX_CCI_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cam_cci_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int cam_cci_assign_fops(void)
+{
+ struct v4l2_subdev *sd;
+ int i = 0;
+
+ for (; i < MAX_CCI; i++) {
+ sd = g_cci_subdev[i];
+ if (!sd)
+ return 0;
+ if (!(sd->devnode)) {
+ CAM_ERR(CAM_CCI,
+ "Invalid dev node:%pK offset: %d",
+ sd->devnode, i);
+ return -EINVAL;
+ }
+ sd->devnode->fops = &cci_v4l2_subdev_fops;
+ }
+
+ return 0;
+}
+
+static int __init cam_cci_late_init(void)
+{
+ return cam_cci_assign_fops();
+}
+
+static int __init cam_cci_init_module(void)
+{
+ return platform_driver_register(&cci_driver);
+}
+
+static void __exit cam_cci_exit_module(void)
+{
+ platform_driver_unregister(&cci_driver);
+}
+
+module_init(cam_cci_init_module);
+late_initcall(cam_cci_late_init);
+module_exit(cam_cci_exit_module);
+MODULE_DESCRIPTION("MSM CCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.h
new file mode 100644
index 000000000000..12abeabe0a38
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -0,0 +1,314 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CCI_DEV_H_
+#define _CAM_CCI_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/cam_sensor.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_io_util.h>
+#include <cam_sensor_util.h>
+#include <cam_subdev.h>
+#include <cam_cpas_api.h>
+#include "cam_cci_hwreg.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define V4L2_IDENT_CCI 50005
+#define CCI_I2C_QUEUE_0_SIZE 128
+#define CCI_I2C_QUEUE_1_SIZE 32
+#define CYCLES_PER_MICRO_SEC_DEFAULT 4915
+#define CCI_MAX_DELAY 1000000
+
+#define CCI_TIMEOUT msecs_to_jiffies(1500)
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE 1
+#define FALSE 0
+
+#define CCI_PINCTRL_STATE_DEFAULT "cci_default"
+#define CCI_PINCTRL_STATE_SLEEP "cci_suspend"
+
+#define CCI_NUM_CLK_MAX 16
+#define CCI_NUM_CLK_CASES 5
+#define CCI_CLK_SRC_NAME "cci_src_clk"
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_10 10
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11 11
+#define BURST_MIN_FREE_SIZE 8
+#define MAX_LRME_V4l2_EVENTS 30
+
+/* Max bytes that can be read per CCI read transaction */
+#define CCI_READ_MAX 256
+#define CCI_I2C_READ_MAX_RETRIES 3
+#define CCI_I2C_MAX_READ 8192
+#define CCI_I2C_MAX_WRITE 8192
+#define CCI_I2C_MAX_BYTE_COUNT 65535
+
+#define CAMX_CCI_DEV_NAME "cam-cci-driver"
+
+#define MAX_CCI 2
+
+#define PRIORITY_QUEUE (QUEUE_0)
+#define SYNC_QUEUE (QUEUE_1)
+
+enum cci_i2c_sync {
+ MSM_SYNC_DISABLE,
+ MSM_SYNC_ENABLE,
+};
+
+enum cam_cci_cmd_type {
+ MSM_CCI_INIT,
+ MSM_CCI_RELEASE,
+ MSM_CCI_SET_SID,
+ MSM_CCI_SET_FREQ,
+ MSM_CCI_SET_SYNC_CID,
+ MSM_CCI_I2C_READ,
+ MSM_CCI_I2C_WRITE,
+ MSM_CCI_I2C_WRITE_SEQ,
+ MSM_CCI_I2C_WRITE_BURST,
+ MSM_CCI_I2C_WRITE_ASYNC,
+ MSM_CCI_GPIO_WRITE,
+ MSM_CCI_I2C_WRITE_SYNC,
+ MSM_CCI_I2C_WRITE_SYNC_BLOCK,
+};
+
+enum cci_i2c_queue_t {
+ QUEUE_0,
+ QUEUE_1,
+ QUEUE_INVALID,
+};
+
+struct cam_cci_wait_sync_cfg {
+ uint16_t cid;
+ int16_t csid;
+ uint16_t line;
+ uint16_t delay;
+};
+
+struct cam_cci_gpio_cfg {
+ uint16_t gpio_queue;
+ uint16_t i2c_queue;
+};
+
+struct cam_cci_read_cfg {
+ uint32_t addr;
+ uint16_t addr_type;
+ uint8_t *data;
+ uint16_t num_byte;
+ uint16_t data_type;
+};
+
+struct cam_cci_i2c_queue_info {
+ uint32_t max_queue_size;
+ uint32_t report_id;
+ uint32_t irq_en;
+ uint32_t capture_rep_data;
+};
+
+struct cam_cci_master_info {
+ uint32_t status;
+ atomic_t q_free[NUM_QUEUES];
+ uint8_t q_lock[NUM_QUEUES];
+ uint8_t reset_pending;
+ struct mutex mutex;
+ struct completion reset_complete;
+ struct completion th_complete;
+ struct mutex mutex_q[NUM_QUEUES];
+ struct completion report_q[NUM_QUEUES];
+ atomic_t done_pending[NUM_QUEUES];
+ spinlock_t lock_q[NUM_QUEUES];
+};
+
+struct cam_cci_clk_params_t {
+ uint16_t hw_thigh;
+ uint16_t hw_tlow;
+ uint16_t hw_tsu_sto;
+ uint16_t hw_tsu_sta;
+ uint16_t hw_thd_dat;
+ uint16_t hw_thd_sta;
+ uint16_t hw_tbuf;
+ uint8_t hw_scl_stretch_en;
+ uint8_t hw_trdhld;
+ uint8_t hw_tsp;
+ uint32_t cci_clk_src;
+};
+
+enum cam_cci_state_t {
+ CCI_STATE_ENABLED,
+ CCI_STATE_DISABLED,
+};
+
+/**
+ * struct cci_device
+ * @pdev: Platform device
+ * @subdev: V4L2 sub device
+ * @base: Base address of CCI device
+ * @hw_version: Hardware version
+ * @ref_count: Reference Count
+ * @cci_state: CCI state machine
+ * @num_clk: Number of CCI clock
+ * @cci_clk: CCI clock structure
+ * @cci_clk_info: CCI clock information
+ * @cam_cci_i2c_queue_info: CCI queue information
+ * @i2c_freq_mode: I2C frequency of operations
+ * @cci_clk_params: CCI hw clk params
+ * @cci_gpio_tbl: CCI GPIO table
+ * @cci_gpio_tbl_size: GPIO table size
+ * @cci_pinctrl: Pinctrl structure
+ * @cci_pinctrl_status: CCI pinctrl status
+ * @cci_clk_src: CCI clk src rate
+ * @cci_vreg: CCI regulator structure
+ * @cci_reg_ptr: CCI individual regulator structure
+ * @regulator_count: Regulator count
+ * @support_seq_write:
+ * Set this flag when sequential write is enabled
+ * @write_wq: Work queue structure
+ * @valid_sync: Is it a valid sync with CSID
+ * @v4l2_dev_str: V4L2 device structure
+ * @cci_wait_sync_cfg: CCI sync config
+ * @cycles_per_us: Cycles per micro sec
+ * @payload_size: CCI packet payload size
+ * @irq_status1: Store irq_status1 to be cleared after
+ * draining FIFO buffer for burst read
+ * @lock_status: to protect changes to irq_status1
+ * @is_burst_read: Flag to determine if we are performing
+ * a burst read operation or not
+ * @irqs_disabled: Mask for IRQs that are disabled
+ */
+struct cci_device {
+ struct v4l2_subdev subdev;
+ struct cam_hw_soc_info soc_info;
+ uint32_t hw_version;
+ uint8_t ref_count;
+ enum cam_cci_state_t cci_state;
+ struct cam_cci_i2c_queue_info
+ cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
+ struct cam_cci_master_info cci_master_info[NUM_MASTERS];
+ enum i2c_freq_mode i2c_freq_mode[NUM_MASTERS];
+ struct cam_cci_clk_params_t cci_clk_params[I2C_MAX_MODES];
+ struct msm_pinctrl_info cci_pinctrl;
+ uint8_t cci_pinctrl_status;
+ uint8_t support_seq_write;
+ struct workqueue_struct *write_wq[MASTER_MAX];
+ struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
+ uint8_t valid_sync;
+ struct cam_subdev v4l2_dev_str;
+ uint32_t cycles_per_us;
+ int32_t clk_level_index;
+ uint8_t payload_size;
+ char device_name[20];
+ uint32_t cpas_handle;
+ uint32_t irq_status1;
+ spinlock_t lock_status;
+ bool is_burst_read;
+ uint32_t irqs_disabled;
+};
+
+enum cam_cci_i2c_cmd_type {
+ CCI_I2C_SET_PARAM_CMD = 1,
+ CCI_I2C_WAIT_CMD,
+ CCI_I2C_WAIT_SYNC_CMD,
+ CCI_I2C_WAIT_GPIO_EVENT_CMD,
+ CCI_I2C_TRIG_I2C_EVENT_CMD,
+ CCI_I2C_LOCK_CMD,
+ CCI_I2C_UNLOCK_CMD,
+ CCI_I2C_REPORT_CMD,
+ CCI_I2C_WRITE_CMD,
+ CCI_I2C_READ_CMD,
+ CCI_I2C_WRITE_DISABLE_P_CMD,
+ CCI_I2C_READ_DISABLE_P_CMD,
+ CCI_I2C_WRITE_CMD2,
+ CCI_I2C_WRITE_CMD3,
+ CCI_I2C_REPEAT_CMD,
+ CCI_I2C_INVALID_CMD,
+};
+
+enum cam_cci_gpio_cmd_type {
+ CCI_GPIO_SET_PARAM_CMD = 1,
+ CCI_GPIO_WAIT_CMD,
+ CCI_GPIO_WAIT_SYNC_CMD,
+ CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD,
+ CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD,
+ CCI_GPIO_OUT_CMD,
+ CCI_GPIO_TRIG_EVENT_CMD,
+ CCI_GPIO_REPORT_CMD,
+ CCI_GPIO_REPEAT_CMD,
+ CCI_GPIO_CONTINUE_CMD,
+ CCI_GPIO_INVALID_CMD,
+};
+
+struct cam_sensor_cci_client {
+ struct v4l2_subdev *cci_subdev;
+ uint32_t freq;
+ enum i2c_freq_mode i2c_freq_mode;
+ enum cci_i2c_master_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t timeout;
+ uint16_t retries;
+ uint16_t id_map;
+ uint16_t cci_device;
+};
+
+struct cam_cci_ctrl {
+ int32_t status;
+ struct cam_sensor_cci_client *cci_info;
+ enum cam_cci_cmd_type cmd;
+ union {
+ struct cam_sensor_i2c_reg_setting cci_i2c_write_cfg;
+ struct cam_cci_read_cfg cci_i2c_read_cfg;
+ struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
+ struct cam_cci_gpio_cfg gpio_cfg;
+ } cfg;
+};
+
+struct cci_write_async {
+ struct cci_device *cci_dev;
+ struct cam_cci_ctrl c_ctrl;
+ enum cci_i2c_queue_t queue;
+ struct work_struct work;
+ enum cci_i2c_sync sync_en;
+};
+
+irqreturn_t cam_cci_irq(int irq_num, void *data);
+
+#ifdef CONFIG_SPECTRA_CAMERA
+extern struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index);
+#else
+static inline struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index)
+{
+ return NULL;
+}
+#endif
+
+#define VIDIOC_MSM_CCI_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct cam_cci_ctrl *)
+
+#endif /* _CAM_CCI_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_hwreg.h
new file mode 100644
index 000000000000..027a0501dcae
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_hwreg.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CCI_HWREG_
+#define _CAM_CCI_HWREG_
+
+#define CCI_HW_VERSION_ADDR 0x00000000
+#define CCI_RESET_CMD_ADDR 0x00000004
+#define CCI_RESET_CMD_RMSK 0x0f73f3f7
+#define CCI_M0_RESET_RMSK 0x3F1
+#define CCI_M1_RESET_RMSK 0x3F001
+#define CCI_QUEUE_START_ADDR 0x00000008
+#define CCI_SET_CID_SYNC_TIMER_ADDR 0x00000010
+#define CCI_SET_CID_SYNC_TIMER_OFFSET 0x00000004
+#define CCI_I2C_M0_SCL_CTL_ADDR 0x00000100
+#define CCI_I2C_M0_SDA_CTL_0_ADDR 0x00000104
+#define CCI_I2C_M0_SDA_CTL_1_ADDR 0x00000108
+#define CCI_I2C_M0_SDA_CTL_2_ADDR 0x0000010c
+#define CCI_I2C_M0_READ_DATA_ADDR 0x00000118
+#define CCI_I2C_M0_MISC_CTL_ADDR 0x00000110
+#define CCI_I2C_M0_READ_BUF_LEVEL_ADDR 0x0000011C
+#define CCI_HALT_REQ_ADDR 0x00000034
+#define CCI_M0_HALT_REQ_RMSK 0x1
+#define CCI_M1_HALT_REQ_RMSK 0x2
+#define CCI_I2C_M1_SCL_CTL_ADDR 0x00000200
+#define CCI_I2C_M1_SDA_CTL_0_ADDR 0x00000204
+#define CCI_I2C_M1_SDA_CTL_1_ADDR 0x00000208
+#define CCI_I2C_M1_SDA_CTL_2_ADDR 0x0000020c
+#define CCI_I2C_M1_MISC_CTL_ADDR 0x00000210
+#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR 0x00000304
+#define CCI_I2C_M0_Q0_CUR_CMD_ADDR 0x00000308
+#define CCI_I2C_M0_Q0_REPORT_STATUS_ADDR 0x0000030c
+#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR 0x00000300
+#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x00000310
+#define CCI_IRQ_MASK_0_ADDR 0x00000c04
+#define CCI_IRQ_MASK_0_RMSK 0x7fff7ff7
+#define CCI_IRQ_MASK_1_ADDR 0x00000c10
+#define CCI_IRQ_MASK_1_RMSK 0x00110000
+#define CCI_IRQ_CLEAR_0_ADDR 0x00000c08
+#define CCI_IRQ_CLEAR_1_ADDR 0x00000c14
+#define CCI_IRQ_STATUS_0_ADDR 0x00000c0c
+#define CCI_IRQ_STATUS_1_ADDR 0x00000c18
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK 0x4000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK 0x2000000
+#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK 0x1000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK 0x100000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK 0x10000
+#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK 0x1000
+#define CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD 0x100000
+#define CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE 0x200000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK 0x100
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK 0x10
+#define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK 0x18000EE6
+#define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK 0x60EE6000
+#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK 0x1
+#define CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD 0x10000
+#define CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE 0x20000
+#define CCI_I2C_M0_RD_THRESHOLD_ADDR 0x00000120
+#define CCI_I2C_M1_RD_THRESHOLD_ADDR 0x00000220
+#define CCI_I2C_RD_THRESHOLD_VALUE 0x30
+#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x00000c00
+
+#define DEBUG_TOP_REG_START 0x0
+#define DEBUG_TOP_REG_COUNT 14
+#define DEBUG_MASTER_REG_START 0x100
+#define DEBUG_MASTER_REG_COUNT 9
+#define DEBUG_MASTER_QUEUE_REG_START 0x300
+#define DEBUG_MASTER_QUEUE_REG_COUNT 7
+#define DEBUG_INTR_REG_START 0xC00
+#define DEBUG_INTR_REG_COUNT 7
+#endif /* _CAM_CCI_HWREG_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.c
new file mode 100644
index 000000000000..da714af3fde2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -0,0 +1,408 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_cci_dev.h"
+#include "cam_cci_core.h"
+
+int cam_cci_init(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl)
+{
+ uint8_t i = 0, j = 0;
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master = MASTER_0;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ soc_info = &cci_dev->soc_info;
+ base = soc_info->reg_map[0].mem_base;
+
+ if (!soc_info || !base) {
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ soc_info, base);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ CAM_DBG(CAM_CCI, "Base address %pK", base);
+
+ if (cci_dev->ref_count++) {
+ CAM_DBG(CAM_CCI, "ref_count %d", cci_dev->ref_count);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ CAM_DBG(CAM_CCI, "master %d", master);
+ if (master < MASTER_MAX && master >= 0) {
+ mutex_lock(&cci_dev->cci_master_info[master].mutex);
+ flush_workqueue(cci_dev->write_wq[master]);
+ /* Re-initialize the completion */
+ reinit_completion(
+ &cci_dev->cci_master_info[master].reset_complete);
+ for (i = 0; i < NUM_QUEUES; i++)
+ reinit_completion(
+ &cci_dev->cci_master_info[master].report_q[i]);
+ /* Set reset pending flag to TRUE */
+ cci_dev->cci_master_info[master].reset_pending = TRUE;
+ /* Set proper mask to RESET CMD address */
+ if (master == MASTER_0)
+ cam_io_w_mb(CCI_M0_RESET_RMSK,
+ base + CCI_RESET_CMD_ADDR);
+ else
+ cam_io_w_mb(CCI_M1_RESET_RMSK,
+ base + CCI_RESET_CMD_ADDR);
+ /* wait for reset done irq */
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0)
+ CAM_ERR(CAM_CCI, "wait failed %d", rc);
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+ }
+ return 0;
+ }
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+ rc = cam_cpas_start(cci_dev->cpas_handle,
+ &ahb_vote, &axi_vote);
+ if (rc != 0)
+ CAM_ERR(CAM_CCI, "CPAS start failed");
+
+ cam_cci_get_clk_rates(cci_dev, c_ctrl);
+
+ /* Re-initialize the completion */
+ reinit_completion(&cci_dev->cci_master_info[master].reset_complete);
+ for (i = 0; i < NUM_QUEUES; i++)
+ reinit_completion(
+ &cci_dev->cci_master_info[master].report_q[i]);
+
+ /* Enable Regulators and IRQ*/
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_LOWSVS_VOTE, true);
+ if (rc < 0) {
+ CAM_DBG(CAM_CCI, "request platform resources failed");
+ goto platform_enable_failed;
+ }
+
+ cci_dev->hw_version = cam_io_r_mb(base +
+ CCI_HW_VERSION_ADDR);
+ CAM_DBG(CAM_CCI, "hw_version = 0x%x", cci_dev->hw_version);
+
+ cci_dev->payload_size =
+ MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
+ cci_dev->support_seq_write = 1;
+
+ for (i = 0; i < NUM_MASTERS; i++) {
+ for (j = 0; j < NUM_QUEUES; j++) {
+ if (j == QUEUE_0)
+ cci_dev->cci_i2c_queue_info[i][j].max_queue_size
+ = CCI_I2C_QUEUE_0_SIZE;
+ else
+ cci_dev->cci_i2c_queue_info[i][j].max_queue_size
+ = CCI_I2C_QUEUE_1_SIZE;
+
+ CAM_DBG(CAM_CCI, "CCI Master[%d] :: Q0 : %d Q1 : %d", i,
+ cci_dev->cci_i2c_queue_info[i][j].max_queue_size,
+ cci_dev->cci_i2c_queue_info[i][j].max_queue_size);
+ }
+ }
+
+ cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+ cam_io_w_mb(CCI_RESET_CMD_RMSK, base +
+ CCI_RESET_CMD_ADDR);
+ cam_io_w_mb(0x1, base + CCI_RESET_CMD_ADDR);
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[MASTER_0].reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0) {
+ CAM_ERR(CAM_CCI, "wait_for_completion_timeout");
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ goto reset_complete_failed;
+ }
+ for (i = 0; i < MASTER_MAX; i++)
+ cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+ cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+ base + CCI_IRQ_MASK_0_ADDR);
+ cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+ base + CCI_IRQ_CLEAR_0_ADDR);
+ cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+ base + CCI_IRQ_MASK_1_ADDR);
+ cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+ base + CCI_IRQ_CLEAR_1_ADDR);
+ cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ for (i = 0; i < MASTER_MAX; i++) {
+ if (!cci_dev->write_wq[i]) {
+ CAM_ERR(CAM_CCI, "Failed to flush write wq");
+ rc = -ENOMEM;
+ goto reset_complete_failed;
+ } else {
+ flush_workqueue(cci_dev->write_wq[i]);
+ }
+ }
+
+ /* Set RD FIFO threshold for M0 & M1 */
+ cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+ base + CCI_I2C_M0_RD_THRESHOLD_ADDR);
+ cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+ base + CCI_I2C_M1_RD_THRESHOLD_ADDR);
+
+ cci_dev->cci_state = CCI_STATE_ENABLED;
+
+ return 0;
+
+reset_complete_failed:
+ cam_soc_util_disable_platform_resource(soc_info, 1, 1);
+
+platform_enable_failed:
+ cci_dev->ref_count--;
+ cam_cpas_stop(cci_dev->cpas_handle);
+
+ return rc;
+}
+
+void cam_cci_soc_remove(struct platform_device *pdev,
+ struct cci_device *cci_dev)
+{
+ struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
+
+ cam_soc_util_release_platform_resource(soc_info);
+}
+
+static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
+{
+ uint8_t i = 0, j = 0;
+
+ for (i = 0; i < NUM_MASTERS; i++) {
+ new_cci_dev->cci_master_info[i].status = 0;
+ mutex_init(&new_cci_dev->cci_master_info[i].mutex);
+ init_completion(
+ &new_cci_dev->cci_master_info[i].reset_complete);
+ init_completion(
+ &new_cci_dev->cci_master_info[i].th_complete);
+
+ for (j = 0; j < NUM_QUEUES; j++) {
+ mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]);
+ init_completion(
+ &new_cci_dev->cci_master_info[i].report_q[j]);
+ spin_lock_init(
+ &new_cci_dev->cci_master_info[i].lock_q[j]);
+ }
+ }
+ spin_lock_init(&new_cci_dev->lock_status);
+}
+
+static void cam_cci_init_default_clk_params(struct cci_device *cci_dev,
+ uint8_t index)
+{
+ /* default clock params are for 100Khz */
+ cci_dev->cci_clk_params[index].hw_thigh = 201;
+ cci_dev->cci_clk_params[index].hw_tlow = 174;
+ cci_dev->cci_clk_params[index].hw_tsu_sto = 204;
+ cci_dev->cci_clk_params[index].hw_tsu_sta = 231;
+ cci_dev->cci_clk_params[index].hw_thd_dat = 22;
+ cci_dev->cci_clk_params[index].hw_thd_sta = 162;
+ cci_dev->cci_clk_params[index].hw_tbuf = 227;
+ cci_dev->cci_clk_params[index].hw_scl_stretch_en = 0;
+ cci_dev->cci_clk_params[index].hw_trdhld = 6;
+ cci_dev->cci_clk_params[index].hw_tsp = 3;
+ cci_dev->cci_clk_params[index].cci_clk_src = 37500000;
+}
+
+static void cam_cci_init_clk_params(struct cci_device *cci_dev)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+ uint8_t count = 0;
+ struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
+ struct device_node *src_node = NULL;
+
+ for (count = 0; count < I2C_MAX_MODES; count++) {
+
+ if (count == I2C_STANDARD_MODE)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_standard_mode");
+ else if (count == I2C_FAST_MODE)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_fast_mode");
+ else if (count == I2C_FAST_PLUS_MODE)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_fast_plus_mode");
+ else
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_custom_mode");
+
+ rc = of_property_read_u32(src_node, "hw-thigh", &val);
+ CAM_DBG(CAM_CCI, "hw-thigh %d, rc %d", val, rc);
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thigh = val;
+ rc = of_property_read_u32(src_node, "hw-tlow",
+ &val);
+ CAM_DBG(CAM_CCI, "hw-tlow %d, rc %d",
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tlow = val;
+ rc = of_property_read_u32(src_node, "hw-tsu-sto",
+ &val);
+ CAM_DBG(CAM_CCI, "hw-tsu-sto %d, rc %d",
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsu_sto = val;
+ rc = of_property_read_u32(src_node, "hw-tsu-sta",
+ &val);
+ CAM_DBG(CAM_CCI, "hw-tsu-sta %d, rc %d",
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsu_sta = val;
+ rc = of_property_read_u32(src_node, "hw-thd-dat",
+ &val);
+ CAM_DBG(CAM_CCI, "hw-thd-dat %d, rc %d",
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thd_dat = val;
+ rc = of_property_read_u32(src_node, "hw-thd-sta",
+ &val);
+ CAM_DBG(CAM_CCI, "hw-thd-sta %d, rc %d",
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thd_sta = val;
+ rc = of_property_read_u32(src_node, "hw-tbuf",
+ &val);
+ CAM_DBG(CAM_CCI, "hw-tbuf %d, rc %d",
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tbuf = val;
+ rc = of_property_read_u32(src_node,
+ "hw-scl-stretch-en", &val);
+ CAM_DBG(CAM_CCI, "hw-scl-stretch-en %d, rc %d",
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
+ rc = of_property_read_u32(src_node, "hw-trdhld",
+ &val);
+ CAM_DBG(CAM_CCI, "hw-trdhld %d, rc %d",
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_trdhld = val;
+ rc = of_property_read_u32(src_node, "hw-tsp",
+ &val);
+ CAM_DBG(CAM_CCI, "hw-tsp %d, rc %d", val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsp = val;
+ val = 0;
+ rc = of_property_read_u32(src_node, "cci-clk-src",
+ &val);
+ CAM_DBG(CAM_CCI, "cci-clk-src %d, rc %d", val, rc);
+ cci_dev->cci_clk_params[count].cci_clk_src = val;
+ } else
+ cam_cci_init_default_clk_params(cci_dev, count);
+
+ of_node_put(src_node);
+ }
+}
+
+int cam_cci_parse_dt_info(struct platform_device *pdev,
+ struct cci_device *new_cci_dev)
+{
+ int rc = 0, i = 0;
+ struct cam_hw_soc_info *soc_info =
+ &new_cci_dev->soc_info;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "Parsing DT data failed:%d", rc);
+ return -EINVAL;
+ }
+
+ new_cci_dev->ref_count = 0;
+
+ rc = cam_soc_util_request_platform_resource(soc_info,
+ cam_cci_irq, new_cci_dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "requesting platform resources failed:%d", rc);
+ return -EINVAL;
+ }
+ new_cci_dev->v4l2_dev_str.pdev = pdev;
+ cam_cci_init_cci_params(new_cci_dev);
+ cam_cci_init_clk_params(new_cci_dev);
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (rc)
+ CAM_ERR(CAM_CCI, "failed to add child nodes, rc=%d", rc);
+
+ for (i = 0; i < MASTER_MAX; i++) {
+ new_cci_dev->write_wq[i] = create_singlethread_workqueue(
+ "cam_cci_wq");
+ if (!new_cci_dev->write_wq[i])
+ CAM_ERR(CAM_CCI, "Failed to create write wq");
+ }
+ CAM_DBG(CAM_CCI, "Exit");
+ return 0;
+}
+
+int cam_cci_soc_release(struct cci_device *cci_dev)
+{
+ uint8_t i = 0, rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+
+ if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
+ CAM_ERR(CAM_CCI, "invalid ref count %d / cci state %d",
+ cci_dev->ref_count, cci_dev->cci_state);
+ return -EINVAL;
+ }
+ if (--cci_dev->ref_count) {
+ CAM_DBG(CAM_CCI, "ref_count Exit %d", cci_dev->ref_count);
+ return 0;
+ }
+ for (i = 0; i < MASTER_MAX; i++)
+ if (cci_dev->write_wq[i])
+ flush_workqueue(cci_dev->write_wq[i]);
+
+ for (i = 0; i < MASTER_MAX; i++)
+ cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_CCI, "platform resources disable failed, rc=%d",
+ rc);
+ return rc;
+ }
+
+ cci_dev->cci_state = CCI_STATE_DISABLED;
+ cci_dev->cycles_per_us = 0;
+
+ cam_cpas_stop(cci_dev->cpas_handle);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.h
new file mode 100644
index 000000000000..4cefc22ca8c5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CCI_SOC_H_
+#define _CAM_CCI_SOC_H_
+
+#include "cam_cci_core.h"
+#include "cam_soc_util.h"
+
+/**
+ * @sd: V4L2 sub device
+ * @c_ctrl: CCI control structure
+ *
+ * This API initializes the CCI and acquires SOC resources
+ */
+int cam_cci_init(struct v4l2_subdev *sd,
+ struct cam_cci_ctrl *c_ctrl);
+
+/**
+ * @cci_dev: CCI device structure
+ *
+ * This API releases the CCI and its SOC resources
+ */
+int cam_cci_soc_release(struct cci_device *cci_dev);
+
+/**
+ * @pdev: Platform device
+ * @new_cci_dev: CCI device structure
+ *
+ * This API parses CCI device tree
+ */
+int cam_cci_parse_dt_info(struct platform_device *pdev,
+ struct cci_device *new_cci_dev);
+
+/**
+ * @pdev: Platform device
+ * @cci_dev: CCI device structure
+ *
+ * This API puts all SOC resources
+ */
+void cam_cci_soc_remove(struct platform_device *pdev,
+ struct cci_device *cci_dev);
+#endif /* _CAM_CCI_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/Makefile
new file mode 100644
index 000000000000..ded58fefec16
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy_soc.o cam_csiphy_dev.o cam_csiphy_core.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
new file mode 100644
index 000000000000..9894b217ac2c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -0,0 +1,876 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_csiphy_core.h"
+#include "cam_csiphy_dev.h"
+#include "cam_csiphy_soc.h"
+#include "cam_common_util.h"
+
+#include <soc/qcom/scm.h>
+#include <cam_mem_mgr.h>
+
+#define SCM_SVC_CAMERASS 0x18
+#define SECURE_SYSCALL_ID 0x6
+#define SECURE_SYSCALL_ID_2 0x7
+
+#define LANE_MASK_2PH 0x1F
+#define LANE_MASK_3PH 0x7
+
+static int csiphy_dump;
+module_param(csiphy_dump, int, 0644);
+
+static int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
+ bool protect, int32_t offset)
+{
+ struct scm_desc desc = {0};
+
+ if (offset >= CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
+ return -EINVAL;
+ }
+
+ desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+ desc.args[0] = protect;
+ desc.args[1] = csiphy_dev->csiphy_cpas_cp_reg_mask[offset];
+
+ if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID_2),
+ &desc)) {
+ CAM_ERR(CAM_CSIPHY, "scm call to hypervisor failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int32_t cam_csiphy_get_instance_offset(
+ struct csiphy_device *csiphy_dev,
+ int32_t dev_handle)
+{
+ int32_t i;
+
+ if (csiphy_dev->acquire_count >
+ CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid acquire count");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < csiphy_dev->acquire_count; i++) {
+ if (dev_handle ==
+ csiphy_dev->bridge_intf.device_hdl[i])
+ break;
+ }
+
+ return i;
+}
+
+void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
+ struct cam_csiphy_query_cap *csiphy_cap)
+{
+ struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+ csiphy_cap->slot_info = soc_info->index;
+ csiphy_cap->version = csiphy_dev->hw_version;
+ csiphy_cap->clk_lane = csiphy_dev->clk_lane;
+}
+
+void cam_csiphy_reset(struct csiphy_device *csiphy_dev)
+{
+ int32_t i;
+ void __iomem *base = NULL;
+ uint32_t size =
+ csiphy_dev->ctrl_reg->csiphy_reg.csiphy_reset_array_size;
+ struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+ base = soc_info->reg_map[0].mem_base;
+
+ for (i = 0; i < size; i++) {
+ cam_io_w_mb(
+ csiphy_dev->ctrl_reg->csiphy_reset_reg[i].reg_data,
+ base +
+ csiphy_dev->ctrl_reg->csiphy_reset_reg[i].reg_addr);
+
+ usleep_range(csiphy_dev->ctrl_reg->
+ csiphy_reset_reg[i].delay * 1000,
+ csiphy_dev->ctrl_reg->csiphy_reset_reg[i].delay * 1000
+ + 10);
+ }
+}
+
+int32_t cam_csiphy_update_secure_info(
+ struct csiphy_device *csiphy_dev,
+ struct cam_csiphy_info *cam_cmd_csiphy_info,
+ struct cam_config_dev_cmd *cfg_dev)
+{
+ uint32_t clock_lane, adj_lane_mask, temp;
+ int32_t offset;
+
+ if (csiphy_dev->acquire_count >=
+ CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid acquire count");
+ return -EINVAL;
+ }
+
+ offset = cam_csiphy_get_instance_offset(csiphy_dev,
+ cfg_dev->dev_handle);
+ if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid offset");
+ return -EINVAL;
+ }
+
+ if (cam_cmd_csiphy_info->combo_mode)
+ clock_lane =
+ csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_combo_ck_ln;
+ else
+ clock_lane =
+ csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_clock_lane;
+
+ adj_lane_mask = cam_cmd_csiphy_info->lane_mask & LANE_MASK_2PH &
+ ~clock_lane;
+ temp = adj_lane_mask & (clock_lane - 1);
+ adj_lane_mask =
+ ((adj_lane_mask & (~(clock_lane - 1))) >> 1) | temp;
+
+ if (cam_cmd_csiphy_info->csiphy_3phase)
+ adj_lane_mask = cam_cmd_csiphy_info->lane_mask & LANE_MASK_3PH;
+
+ csiphy_dev->csiphy_info.secure_mode[offset] = 1;
+
+ csiphy_dev->csiphy_cpas_cp_reg_mask[offset] =
+ adj_lane_mask << (csiphy_dev->soc_info.index *
+ (CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES) +
+ (!cam_cmd_csiphy_info->csiphy_3phase) *
+ (CAM_CSIPHY_MAX_CPHY_LANES));
+
+ return 0;
+}
+
+int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
+ struct cam_config_dev_cmd *cfg_dev)
+{
+ int32_t rc = 0;
+ uintptr_t generic_ptr;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ uint32_t *cmd_buf = NULL;
+ struct cam_csiphy_info *cam_cmd_csiphy_info = NULL;
+ size_t len;
+
+ if (!cfg_dev || !csiphy_dev) {
+ CAM_ERR(CAM_CSIPHY, "Invalid Args");
+ return -EINVAL;
+ }
+
+ rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
+ &generic_ptr, &len);
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
+ return rc;
+ }
+
+ if (cfg_dev->offset > len) {
+ CAM_ERR(CAM_CSIPHY,
+ "offset is out of bounds: offset: %lld len: %zu",
+ cfg_dev->offset, len);
+ return -EINVAL;
+ }
+
+ csl_packet = (struct cam_packet *)
+ (generic_ptr + (uint32_t)cfg_dev->offset);
+
+ cmd_desc = (struct cam_cmd_buf_desc *)
+ ((uint32_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset / 4);
+
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ &generic_ptr, &len);
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY,
+ "Failed to get cmd buf Mem address : %d", rc);
+ return rc;
+ }
+
+ cmd_buf = (uint32_t *)generic_ptr;
+ cmd_buf += cmd_desc->offset / 4;
+ cam_cmd_csiphy_info = (struct cam_csiphy_info *)cmd_buf;
+
+ csiphy_dev->config_count++;
+ csiphy_dev->csiphy_info.lane_cnt += cam_cmd_csiphy_info->lane_cnt;
+ csiphy_dev->csiphy_info.lane_mask |= cam_cmd_csiphy_info->lane_mask;
+ csiphy_dev->csiphy_info.csiphy_3phase =
+ cam_cmd_csiphy_info->csiphy_3phase;
+ csiphy_dev->csiphy_info.combo_mode |= cam_cmd_csiphy_info->combo_mode;
+ if (cam_cmd_csiphy_info->combo_mode == 1)
+ csiphy_dev->csiphy_info.settle_time_combo_sensor =
+ cam_cmd_csiphy_info->settle_time;
+ else
+ csiphy_dev->csiphy_info.settle_time =
+ cam_cmd_csiphy_info->settle_time;
+ csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate;
+
+ if (cam_cmd_csiphy_info->secure_mode == 1)
+ cam_csiphy_update_secure_info(csiphy_dev,
+ cam_cmd_csiphy_info, cfg_dev);
+
+ return rc;
+}
+
+void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev)
+{
+ int32_t i;
+ void __iomem *csiphybase =
+ csiphy_dev->soc_info.reg_map[0].mem_base;
+
+ for (i = 0; i < csiphy_dev->num_irq_registers; i++)
+ cam_io_w_mb(
+ csiphy_dev->ctrl_reg->csiphy_irq_reg[i].reg_data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_irq_reg[i].reg_addr);
+}
+
+void cam_csiphy_cphy_irq_disable(struct csiphy_device *csiphy_dev)
+{
+ int32_t i;
+ void __iomem *csiphybase =
+ csiphy_dev->soc_info.reg_map[0].mem_base;
+
+ for (i = 0; i < csiphy_dev->num_irq_registers; i++)
+ cam_io_w_mb(0x0, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_irq_reg[i].reg_addr);
+}
+
+irqreturn_t cam_csiphy_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ uint8_t i;
+ struct csiphy_device *csiphy_dev =
+ (struct csiphy_device *)data;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct csiphy_reg_parms_t *csiphy_reg = NULL;
+ void __iomem *base = NULL;
+
+ if (!csiphy_dev) {
+ CAM_ERR(CAM_CSIPHY, "Invalid Args");
+ return IRQ_NONE;
+ }
+
+ soc_info = &csiphy_dev->soc_info;
+ base = csiphy_dev->soc_info.reg_map[0].mem_base;
+ csiphy_reg = &csiphy_dev->ctrl_reg->csiphy_reg;
+
+ for (i = 0; i < csiphy_dev->num_irq_registers; i++) {
+ irq = cam_io_r(base +
+ csiphy_reg->mipi_csiphy_interrupt_status0_addr +
+ (0x4 * i));
+ cam_io_w_mb(irq, base +
+ csiphy_reg->mipi_csiphy_interrupt_clear0_addr +
+ (0x4 * i));
+ CAM_ERR_RATE_LIMIT(CAM_CSIPHY,
+ "CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x",
+ soc_info->index, i, irq);
+ cam_io_w_mb(0x0, base +
+ csiphy_reg->mipi_csiphy_interrupt_clear0_addr +
+ (0x4 * i));
+ }
+ cam_io_w_mb(0x1, base + csiphy_reg->mipi_csiphy_glbl_irq_cmd_addr);
+ cam_io_w_mb(0x0, base + csiphy_reg->mipi_csiphy_glbl_irq_cmd_addr);
+
+ return IRQ_HANDLED;
+}
+
+int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
+{
+ int32_t rc = 0;
+ uint32_t lane_enable = 0, mask = 1, size = 0;
+ uint16_t lane_mask = 0, i = 0, cfg_size = 0, temp = 0;
+ uint8_t lane_cnt, lane_pos = 0;
+ uint16_t settle_cnt = 0;
+ void __iomem *csiphybase;
+ struct csiphy_reg_t *csiphy_common_reg = NULL;
+ struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
+
+ lane_cnt = csiphy_dev->csiphy_info.lane_cnt;
+ csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
+
+ if (!csiphybase) {
+ CAM_ERR(CAM_CSIPHY, "csiphybase NULL");
+ return -EINVAL;
+ }
+
+ if (!csiphy_dev->csiphy_info.csiphy_3phase) {
+ if (csiphy_dev->csiphy_info.combo_mode == 1)
+ reg_array =
+ csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg;
+ else
+ reg_array =
+ csiphy_dev->ctrl_reg->csiphy_2ph_reg;
+ csiphy_dev->num_irq_registers = 11;
+ cfg_size =
+ csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_config_array_size;
+
+ lane_mask = csiphy_dev->csiphy_info.lane_mask & LANE_MASK_2PH;
+ for (i = 0; i < MAX_DPHY_DATA_LN; i++) {
+ if (mask == 0x2) {
+ if (lane_mask & mask)
+ lane_enable |= 0x80;
+ i--;
+ } else if (lane_mask & mask) {
+ lane_enable |= 0x1 << (i<<1);
+ }
+ mask <<= 1;
+ }
+ } else {
+ if (csiphy_dev->csiphy_info.combo_mode == 1) {
+ if (csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg)
+ reg_array =
+ csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg;
+ else {
+ reg_array =
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg;
+ CAM_ERR(CAM_CSIPHY,
+ "Unsupported configuration, Falling back to CPHY mode");
+ }
+ } else
+ reg_array =
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg;
+ csiphy_dev->num_irq_registers = 11;
+ cfg_size =
+ csiphy_dev->ctrl_reg->csiphy_reg.csiphy_3ph_config_array_size;
+
+ lane_mask = csiphy_dev->csiphy_info.lane_mask & LANE_MASK_3PH;
+ mask = lane_mask;
+ while (mask != 0) {
+ temp = (i << 1)+1;
+ lane_enable |= ((mask & 0x1) << temp);
+ mask >>= 1;
+ i++;
+ }
+ }
+
+ size = csiphy_dev->ctrl_reg->csiphy_reg.csiphy_common_array_size;
+
+ for (i = 0; i < size; i++) {
+ csiphy_common_reg = &csiphy_dev->ctrl_reg->csiphy_common_reg[i];
+ switch (csiphy_common_reg->csiphy_param_type) {
+ case CSIPHY_LANE_ENABLE:
+ cam_io_w_mb(lane_enable,
+ csiphybase + csiphy_common_reg->reg_addr);
+ usleep_range(csiphy_common_reg->delay * 1000,
+ csiphy_common_reg->delay * 1000 + 10);
+ break;
+ case CSIPHY_DEFAULT_PARAMS:
+ cam_io_w_mb(csiphy_common_reg->reg_data,
+ csiphybase + csiphy_common_reg->reg_addr);
+ usleep_range(csiphy_common_reg->delay * 1000,
+ csiphy_common_reg->delay * 1000 + 10);
+ break;
+ case CSIPHY_2PH_REGS:
+ if (!csiphy_dev->csiphy_info.csiphy_3phase) {
+ cam_io_w_mb(csiphy_common_reg->reg_data,
+ csiphybase +
+ csiphy_common_reg->reg_addr);
+ usleep_range(csiphy_common_reg->delay * 1000,
+ csiphy_common_reg->delay * 1000 + 10);
+ }
+ break;
+ case CSIPHY_3PH_REGS:
+ if (csiphy_dev->csiphy_info.csiphy_3phase) {
+ cam_io_w_mb(csiphy_common_reg->reg_data,
+ csiphybase +
+ csiphy_common_reg->reg_addr);
+ usleep_range(csiphy_common_reg->delay * 1000,
+ csiphy_common_reg->delay * 1000 + 10);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ while (lane_mask) {
+ if (!(lane_mask & 0x1)) {
+ lane_pos++;
+ lane_mask >>= 1;
+ continue;
+ }
+
+ settle_cnt = (csiphy_dev->csiphy_info.settle_time / 200000000);
+ if (csiphy_dev->csiphy_info.combo_mode == 1 &&
+ (lane_pos >= 3))
+ settle_cnt =
+ (csiphy_dev->csiphy_info.settle_time_combo_sensor /
+ 200000000);
+ for (i = 0; i < cfg_size; i++) {
+ switch (reg_array[lane_pos][i].csiphy_param_type) {
+ case CSIPHY_LANE_ENABLE:
+ cam_io_w_mb(lane_enable,
+ csiphybase +
+ reg_array[lane_pos][i].reg_addr);
+ break;
+ case CSIPHY_DEFAULT_PARAMS:
+ cam_io_w_mb(reg_array[lane_pos][i].reg_data,
+ csiphybase +
+ reg_array[lane_pos][i].reg_addr);
+ break;
+ case CSIPHY_SETTLE_CNT_LOWER_BYTE:
+ cam_io_w_mb(settle_cnt & 0xFF,
+ csiphybase +
+ reg_array[lane_pos][i].reg_addr);
+ break;
+ case CSIPHY_SETTLE_CNT_HIGHER_BYTE:
+ cam_io_w_mb((settle_cnt >> 8) & 0xFF,
+ csiphybase +
+ reg_array[lane_pos][i].reg_addr);
+ break;
+ default:
+ CAM_DBG(CAM_CSIPHY, "Do Nothing");
+ break;
+ }
+ if (reg_array[lane_pos][i].delay > 0) {
+ usleep_range(reg_array[lane_pos][i].delay*1000,
+ reg_array[lane_pos][i].delay*1000 + 10);
+ }
+ }
+ lane_mask >>= 1;
+ lane_pos++;
+ }
+
+ cam_csiphy_cphy_irq_config(csiphy_dev);
+
+ return rc;
+}
+
+void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev)
+{
+ struct cam_hw_soc_info *soc_info;
+ int32_t i = 0;
+
+ if (csiphy_dev->csiphy_state == CAM_CSIPHY_INIT)
+ return;
+
+ if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
+ soc_info = &csiphy_dev->soc_info;
+
+ for (i = 0; i < csiphy_dev->acquire_count; i++) {
+ if (csiphy_dev->csiphy_info.secure_mode[i])
+ cam_csiphy_notify_secure_mode(
+ csiphy_dev,
+ CAM_SECURE_MODE_NON_SECURE, i);
+
+ csiphy_dev->csiphy_info.secure_mode[i] =
+ CAM_SECURE_MODE_NON_SECURE;
+
+ csiphy_dev->csiphy_cpas_cp_reg_mask[i] = 0;
+ }
+
+ cam_csiphy_reset(csiphy_dev);
+ cam_soc_util_disable_platform_resource(soc_info, true, true);
+
+ cam_cpas_stop(csiphy_dev->cpas_handle);
+ csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
+ }
+
+ if (csiphy_dev->csiphy_state == CAM_CSIPHY_ACQUIRE) {
+ if (csiphy_dev->bridge_intf.device_hdl[0] != -1)
+ cam_destroy_device_hdl(
+ csiphy_dev->bridge_intf.device_hdl[0]);
+ if (csiphy_dev->bridge_intf.device_hdl[1] != -1)
+ cam_destroy_device_hdl(
+ csiphy_dev->bridge_intf.device_hdl[1]);
+ csiphy_dev->bridge_intf.device_hdl[0] = -1;
+ csiphy_dev->bridge_intf.device_hdl[1] = -1;
+ csiphy_dev->bridge_intf.link_hdl[0] = -1;
+ csiphy_dev->bridge_intf.link_hdl[1] = -1;
+ csiphy_dev->bridge_intf.session_hdl[0] = -1;
+ csiphy_dev->bridge_intf.session_hdl[1] = -1;
+ }
+
+ csiphy_dev->ref_count = 0;
+ csiphy_dev->is_acquired_dev_combo_mode = 0;
+ csiphy_dev->acquire_count = 0;
+ csiphy_dev->start_dev_count = 0;
+ csiphy_dev->csiphy_state = CAM_CSIPHY_INIT;
+}
+
+static int32_t cam_csiphy_external_cmd(struct csiphy_device *csiphy_dev,
+ struct cam_config_dev_cmd *p_submit_cmd)
+{
+ struct cam_csiphy_info cam_cmd_csiphy_info;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cam_cmd_csiphy_info,
+ u64_to_user_ptr(p_submit_cmd->packet_handle),
+ sizeof(struct cam_csiphy_info))) {
+ CAM_ERR(CAM_CSIPHY, "failed to copy cam_csiphy_info\n");
+ rc = -EFAULT;
+ } else {
+ csiphy_dev->csiphy_info.lane_cnt =
+ cam_cmd_csiphy_info.lane_cnt;
+ csiphy_dev->csiphy_info.lane_cnt =
+ cam_cmd_csiphy_info.lane_cnt;
+ csiphy_dev->csiphy_info.lane_mask =
+ cam_cmd_csiphy_info.lane_mask;
+ csiphy_dev->csiphy_info.csiphy_3phase =
+ cam_cmd_csiphy_info.csiphy_3phase;
+ csiphy_dev->csiphy_info.combo_mode =
+ cam_cmd_csiphy_info.combo_mode;
+ csiphy_dev->csiphy_info.settle_time =
+ cam_cmd_csiphy_info.settle_time;
+ csiphy_dev->csiphy_info.data_rate =
+ cam_cmd_csiphy_info.data_rate;
+ CAM_DBG(CAM_CSIPHY,
+ "%s CONFIG_DEV_EXT settle_time= %lld lane_cnt=%d lane_mask=0x%x",
+ __func__,
+ csiphy_dev->csiphy_info.settle_time,
+ csiphy_dev->csiphy_info.lane_cnt,
+ csiphy_dev->csiphy_info.lane_mask);
+ }
+
+ return rc;
+}
+
+int32_t cam_csiphy_core_cfg(void *phy_dev,
+ void *arg)
+{
+ struct csiphy_device *csiphy_dev =
+ (struct csiphy_device *)phy_dev;
+ struct intf_params *bridge_intf = NULL;
+ struct cam_control *cmd = (struct cam_control *)arg;
+ int32_t rc = 0;
+
+ if (!csiphy_dev || !cmd) {
+ CAM_ERR(CAM_CSIPHY, "Invalid input args");
+ return -EINVAL;
+ }
+
+ if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+ CAM_ERR(CAM_CSIPHY, "Invalid handle type: %d",
+ cmd->handle_type);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_CSIPHY, "Opcode received: %d", cmd->op_code);
+ mutex_lock(&csiphy_dev->mutex);
+ switch (cmd->op_code) {
+ case CAM_ACQUIRE_DEV: {
+ struct cam_sensor_acquire_dev csiphy_acq_dev;
+ struct cam_csiphy_acquire_dev_info csiphy_acq_params;
+
+ struct cam_create_dev_hdl bridge_params;
+
+ rc = copy_from_user(&csiphy_acq_dev,
+ u64_to_user_ptr(cmd->handle),
+ sizeof(csiphy_acq_dev));
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+ goto release_mutex;
+ }
+
+ csiphy_acq_params.combo_mode = 0;
+
+ if (copy_from_user(&csiphy_acq_params,
+ u64_to_user_ptr(csiphy_acq_dev.info_handle),
+ sizeof(csiphy_acq_params))) {
+ CAM_ERR(CAM_CSIPHY,
+ "Failed copying from User");
+ goto release_mutex;
+ }
+
+ if (csiphy_dev->acquire_count == 2) {
+ CAM_ERR(CAM_CSIPHY,
+ "CSIPHY device do not allow more than 2 acquires");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ if ((csiphy_acq_params.combo_mode == 1) &&
+ (csiphy_dev->is_acquired_dev_combo_mode == 1)) {
+ CAM_ERR(CAM_CSIPHY,
+ "Multiple Combo Acq are not allowed: cm: %d, acm: %d",
+ csiphy_acq_params.combo_mode,
+ csiphy_dev->is_acquired_dev_combo_mode);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ if ((csiphy_acq_params.combo_mode != 1) &&
+ (csiphy_dev->is_acquired_dev_combo_mode != 1) &&
+ (csiphy_dev->acquire_count == 1)) {
+ CAM_ERR(CAM_CSIPHY,
+ "Multiple Acquires are not allowed cm: %d acm: %d",
+ csiphy_acq_params.combo_mode,
+ csiphy_dev->is_acquired_dev_combo_mode);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ bridge_params.ops = NULL;
+ bridge_params.session_hdl = csiphy_acq_dev.session_handle;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = csiphy_dev;
+
+ if (csiphy_acq_params.combo_mode >= 2) {
+ CAM_ERR(CAM_CSIPHY, "Invalid combo_mode %d",
+ csiphy_acq_params.combo_mode);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ csiphy_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ bridge_intf = &csiphy_dev->bridge_intf;
+ bridge_intf->device_hdl[csiphy_acq_params.combo_mode]
+ = csiphy_acq_dev.device_handle;
+ bridge_intf->session_hdl[csiphy_acq_params.combo_mode] =
+ csiphy_acq_dev.session_handle;
+
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &csiphy_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev))) {
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ if (csiphy_acq_params.combo_mode == 1)
+ csiphy_dev->is_acquired_dev_combo_mode = 1;
+
+ csiphy_dev->acquire_count++;
+ csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
+ }
+ break;
+ case CAM_QUERY_CAP: {
+ struct cam_csiphy_query_cap csiphy_cap = {0};
+
+ cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &csiphy_cap, sizeof(struct cam_csiphy_query_cap))) {
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ }
+ break;
+ case CAM_STOP_DEV: {
+ int32_t offset, rc = 0;
+ struct cam_start_stop_dev_cmd config;
+
+ rc = copy_from_user(&config, (void __user *)cmd->handle,
+ sizeof(config));
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+ goto release_mutex;
+ }
+
+ if ((csiphy_dev->csiphy_state != CAM_CSIPHY_START) ||
+ !csiphy_dev->start_dev_count) {
+ CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d",
+ csiphy_dev->csiphy_state);
+ goto release_mutex;
+ }
+
+ offset = cam_csiphy_get_instance_offset(csiphy_dev,
+ config.dev_handle);
+ if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid offset");
+ goto release_mutex;
+ }
+
+ if (--csiphy_dev->start_dev_count) {
+ CAM_DBG(CAM_CSIPHY, "Stop Dev ref Cnt: %d",
+ csiphy_dev->start_dev_count);
+ if (csiphy_dev->csiphy_info.secure_mode[offset])
+ cam_csiphy_notify_secure_mode(
+ csiphy_dev,
+ CAM_SECURE_MODE_NON_SECURE, offset);
+
+ csiphy_dev->csiphy_info.secure_mode[offset] =
+ CAM_SECURE_MODE_NON_SECURE;
+ csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0;
+
+ goto release_mutex;
+ }
+
+ if (csiphy_dev->csiphy_info.secure_mode[offset])
+ cam_csiphy_notify_secure_mode(
+ csiphy_dev,
+ CAM_SECURE_MODE_NON_SECURE, offset);
+
+ csiphy_dev->csiphy_info.secure_mode[offset] =
+ CAM_SECURE_MODE_NON_SECURE;
+
+ csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0x0;
+
+ rc = cam_csiphy_disable_hw(csiphy_dev);
+ if (rc < 0)
+ CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
+
+ rc = cam_cpas_stop(csiphy_dev->cpas_handle);
+ if (rc < 0)
+ CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
+
+ csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
+ }
+ break;
+ case CAM_RELEASE_DEV: {
+ struct cam_release_dev_cmd release;
+
+ if (!csiphy_dev->acquire_count) {
+ CAM_ERR(CAM_CSIPHY, "No valid devices to release");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ if (copy_from_user(&release,
+ u64_to_user_ptr(cmd->handle),
+ sizeof(release))) {
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+
+ rc = cam_destroy_device_hdl(release.dev_handle);
+ if (rc < 0)
+ CAM_ERR(CAM_CSIPHY, "destroying the device hdl");
+ if (release.dev_handle ==
+ csiphy_dev->bridge_intf.device_hdl[0]) {
+ csiphy_dev->bridge_intf.device_hdl[0] = -1;
+ csiphy_dev->bridge_intf.link_hdl[0] = -1;
+ csiphy_dev->bridge_intf.session_hdl[0] = -1;
+ } else {
+ csiphy_dev->bridge_intf.device_hdl[1] = -1;
+ csiphy_dev->bridge_intf.link_hdl[1] = -1;
+ csiphy_dev->bridge_intf.session_hdl[1] = -1;
+ csiphy_dev->is_acquired_dev_combo_mode = 0;
+ }
+
+ csiphy_dev->config_count--;
+ csiphy_dev->acquire_count--;
+
+ if (csiphy_dev->acquire_count == 0)
+ csiphy_dev->csiphy_state = CAM_CSIPHY_INIT;
+
+ if (csiphy_dev->config_count == 0) {
+ CAM_DBG(CAM_CSIPHY, "reset csiphy_info");
+ csiphy_dev->csiphy_info.lane_mask = 0;
+ csiphy_dev->csiphy_info.lane_cnt = 0;
+ csiphy_dev->csiphy_info.combo_mode = 0;
+ }
+ }
+ break;
+ case CAM_CONFIG_DEV: {
+ struct cam_config_dev_cmd config;
+
+ if (copy_from_user(&config,
+ u64_to_user_ptr(cmd->handle),
+ sizeof(config))) {
+ rc = -EFAULT;
+ } else {
+ rc = cam_cmd_buf_parser(csiphy_dev, &config);
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "Fail in cmd buf parser");
+ goto release_mutex;
+ }
+ }
+ break;
+ }
+ case CAM_START_DEV: {
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ struct cam_start_stop_dev_cmd config;
+ int32_t offset;
+
+ rc = copy_from_user(&config, (void __user *)cmd->handle,
+ sizeof(config));
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+ goto release_mutex;
+ }
+
+ if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
+ csiphy_dev->start_dev_count++;
+ goto release_mutex;
+ }
+
+ offset = cam_csiphy_get_instance_offset(csiphy_dev,
+ config.dev_handle);
+ if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid offset");
+ goto release_mutex;
+ }
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+ rc = cam_cpas_start(csiphy_dev->cpas_handle,
+ &ahb_vote, &axi_vote);
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "voting CPAS: %d", rc);
+ goto release_mutex;
+ }
+
+ if (csiphy_dev->csiphy_info.secure_mode[offset] == 1) {
+ rc = cam_csiphy_notify_secure_mode(
+ csiphy_dev,
+ CAM_SECURE_MODE_SECURE, offset);
+ if (rc < 0) {
+ csiphy_dev->csiphy_info.secure_mode[offset] =
+ CAM_SECURE_MODE_NON_SECURE;
+ goto release_mutex;
+ }
+ }
+
+ rc = cam_csiphy_enable_hw(csiphy_dev);
+ if (rc != 0) {
+ CAM_ERR(CAM_CSIPHY, "cam_csiphy_enable_hw failed");
+ cam_cpas_stop(csiphy_dev->cpas_handle);
+ goto release_mutex;
+ }
+ rc = cam_csiphy_config_dev(csiphy_dev);
+ if (csiphy_dump == 1)
+ cam_csiphy_mem_dmp(&csiphy_dev->soc_info);
+
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "cam_csiphy_config_dev failed");
+ cam_csiphy_disable_hw(csiphy_dev);
+ cam_cpas_stop(csiphy_dev->cpas_handle);
+ goto release_mutex;
+ }
+ csiphy_dev->start_dev_count++;
+ csiphy_dev->csiphy_state = CAM_CSIPHY_START;
+ }
+ break;
+ case CAM_CONFIG_DEV_EXTERNAL: {
+ struct cam_config_dev_cmd submit_cmd;
+
+ if (copy_from_user(&submit_cmd,
+ u64_to_user_ptr(cmd->handle),
+ sizeof(struct cam_config_dev_cmd))) {
+ CAM_ERR(CAM_CSIPHY, "failed copy config ext\n");
+ rc = -EFAULT;
+ } else {
+ rc = cam_csiphy_external_cmd(csiphy_dev, &submit_cmd);
+ }
+ break;
+ }
+ default:
+ CAM_ERR(CAM_CSIPHY, "Invalid Opcode: %d", cmd->op_code);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+release_mutex:
+ mutex_unlock(&csiphy_dev->mutex);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.h
new file mode 100644
index 000000000000..91cbfadb7d06
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_CORE_H_
+#define _CAM_CSIPHY_CORE_H_
+
+#include <linux/irqreturn.h>
+#include "cam_csiphy_dev.h"
+#include <cam_mem_mgr.h>
+#include <cam_req_mgr_util.h>
+#include <cam_io_util.h>
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API programs CSIPhy IRQ registers
+ */
+void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API resets CSIPhy hardware
+ */
+void cam_csiphy_reset(struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ * @arg: Camera control command argument
+ *
+ * This API handles the camera control argument reached to CSIPhy
+ */
+int cam_csiphy_core_cfg(void *csiphy_dev, void *arg);
+
+/**
+ * @irq_num: IRQ number
+ * @data: CSIPhy device structure
+ *
+ * This API handles CSIPhy IRQs
+ */
+irqreturn_t cam_csiphy_irq(int irq_num, void *data);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API handles the CSIPhy close
+ */
+void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev);
+
+#endif /* _CAM_CSIPHY_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
new file mode 100644
index 000000000000..32bb34bb257b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -0,0 +1,253 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_csiphy_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_csiphy_soc.h"
+#include "cam_csiphy_core.h"
+#include <media/cam_sensor.h>
+
+static long cam_csiphy_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct csiphy_device *csiphy_dev = v4l2_get_subdevdata(sd);
+ int rc = 0;
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_csiphy_core_cfg(csiphy_dev, arg);
+ if (rc != 0) {
+ CAM_ERR(CAM_CSIPHY, "in configuring the device");
+ return rc;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_CSIPHY, "Wrong ioctl : %d", cmd);
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_csiphy_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct csiphy_device *csiphy_dev =
+ v4l2_get_subdevdata(sd);
+
+ if (!csiphy_dev) {
+ CAM_ERR(CAM_CSIPHY, "csiphy_dev ptr is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&csiphy_dev->mutex);
+ cam_csiphy_shutdown(csiphy_dev);
+ mutex_unlock(&csiphy_dev->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_csiphy_subdev_compat_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ int32_t rc = 0;
+ struct cam_control cmd_data;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_CSIPHY, "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ /* All the arguments converted to 64 bit here
+ * Passed to the api in core.c
+ */
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_csiphy_subdev_ioctl(sd, cmd, &cmd_data);
+ break;
+ default:
+ CAM_ERR(CAM_CSIPHY, "Invalid compat ioctl cmd: %d", cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_CSIPHY,
+ "Failed to copy to user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
+ return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops csiphy_subdev_core_ops = {
+ .ioctl = cam_csiphy_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_csiphy_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops csiphy_subdev_ops = {
+ .core = &csiphy_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csiphy_subdev_intern_ops = {
+ .close = cam_csiphy_subdev_close,
+};
+
+static int32_t cam_csiphy_platform_probe(struct platform_device *pdev)
+{
+ struct cam_cpas_register_params cpas_parms;
+ struct csiphy_device *new_csiphy_dev;
+ int32_t rc = 0;
+
+ new_csiphy_dev = devm_kzalloc(&pdev->dev,
+ sizeof(struct csiphy_device), GFP_KERNEL);
+ if (!new_csiphy_dev)
+ return -ENOMEM;
+
+ new_csiphy_dev->ctrl_reg = kzalloc(sizeof(struct csiphy_ctrl_t),
+ GFP_KERNEL);
+ if (!new_csiphy_dev->ctrl_reg) {
+ devm_kfree(&pdev->dev, new_csiphy_dev);
+ return -ENOMEM;
+ }
+
+ mutex_init(&new_csiphy_dev->mutex);
+ new_csiphy_dev->v4l2_dev_str.pdev = pdev;
+
+ new_csiphy_dev->soc_info.pdev = pdev;
+ new_csiphy_dev->soc_info.dev = &pdev->dev;
+ new_csiphy_dev->soc_info.dev_name = pdev->name;
+ new_csiphy_dev->ref_count = 0;
+
+ rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "DT parsing failed: %d", rc);
+ goto csiphy_no_resource;
+ }
+
+ new_csiphy_dev->v4l2_dev_str.internal_ops =
+ &csiphy_subdev_intern_ops;
+ new_csiphy_dev->v4l2_dev_str.ops =
+ &csiphy_subdev_ops;
+ strlcpy(new_csiphy_dev->device_name, CAMX_CSIPHY_DEV_NAME,
+ sizeof(new_csiphy_dev->device_name));
+ new_csiphy_dev->v4l2_dev_str.name =
+ new_csiphy_dev->device_name;
+ new_csiphy_dev->v4l2_dev_str.sd_flags =
+ (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+ new_csiphy_dev->v4l2_dev_str.ent_function =
+ CAM_CSIPHY_DEVICE_TYPE;
+ new_csiphy_dev->v4l2_dev_str.token =
+ new_csiphy_dev;
+
+ rc = cam_register_subdev(&(new_csiphy_dev->v4l2_dev_str));
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "cam_register_subdev Failed rc: %d", rc);
+ goto csiphy_no_resource;
+ }
+
+ platform_set_drvdata(pdev, &(new_csiphy_dev->v4l2_dev_str.sd));
+ v4l2_set_subdevdata(&(new_csiphy_dev->v4l2_dev_str.sd), new_csiphy_dev);
+
+ new_csiphy_dev->bridge_intf.device_hdl[0] = -1;
+ new_csiphy_dev->bridge_intf.device_hdl[1] = -1;
+ new_csiphy_dev->bridge_intf.ops.get_dev_info =
+ NULL;
+ new_csiphy_dev->bridge_intf.ops.link_setup =
+ NULL;
+ new_csiphy_dev->bridge_intf.ops.apply_req =
+ NULL;
+
+ new_csiphy_dev->acquire_count = 0;
+ new_csiphy_dev->start_dev_count = 0;
+ new_csiphy_dev->is_acquired_dev_combo_mode = 0;
+
+ cpas_parms.cam_cpas_client_cb = NULL;
+ cpas_parms.cell_index = new_csiphy_dev->soc_info.index;
+ cpas_parms.dev = &pdev->dev;
+ cpas_parms.userdata = new_csiphy_dev;
+
+ strlcpy(cpas_parms.identifier, "csiphy", CAM_HW_IDENTIFIER_LENGTH);
+ rc = cam_cpas_register_client(&cpas_parms);
+ if (rc) {
+ CAM_ERR(CAM_CSIPHY, "CPAS registration failed rc: %d", rc);
+ goto csiphy_no_resource;
+ }
+ CAM_DBG(CAM_CSIPHY, "CPAS registration successful handle=%d",
+ cpas_parms.client_handle);
+ new_csiphy_dev->cpas_handle = cpas_parms.client_handle;
+
+ return rc;
+csiphy_no_resource:
+ mutex_destroy(&new_csiphy_dev->mutex);
+ kfree(new_csiphy_dev->ctrl_reg);
+ devm_kfree(&pdev->dev, new_csiphy_dev);
+ return rc;
+}
+
+
+static int32_t cam_csiphy_device_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev =
+ platform_get_drvdata(pdev);
+ struct csiphy_device *csiphy_dev =
+ v4l2_get_subdevdata(subdev);
+
+ cam_cpas_unregister_client(csiphy_dev->cpas_handle);
+ cam_csiphy_soc_release(csiphy_dev);
+ kfree(csiphy_dev->ctrl_reg);
+ devm_kfree(&pdev->dev, csiphy_dev);
+
+ return 0;
+}
+
+static const struct of_device_id cam_csiphy_dt_match[] = {
+ {.compatible = "qcom,csiphy"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, cam_csiphy_dt_match);
+
+static struct platform_driver csiphy_driver = {
+ .probe = cam_csiphy_platform_probe,
+ .remove = cam_csiphy_device_remove,
+ .driver = {
+ .name = CAMX_CSIPHY_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cam_csiphy_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int32_t __init cam_csiphy_init_module(void)
+{
+ return platform_driver_register(&csiphy_driver);
+}
+
+static void __exit cam_csiphy_exit_module(void)
+{
+ platform_driver_unregister(&csiphy_driver);
+}
+
+module_init(cam_csiphy_init_module);
+module_exit(cam_csiphy_exit_module);
+MODULE_DESCRIPTION("CAM CSIPHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
new file mode 100644
index 000000000000..248903251d62
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -0,0 +1,263 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_DEV_H_
+#define _CAM_CSIPHY_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_defs.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_subdev.h>
+#include <cam_io_util.h>
+#include <cam_cpas_api.h>
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define MAX_CSIPHY 3
+#define MAX_DPHY_DATA_LN 4
+#define MAX_LRME_V4l2_EVENTS 30
+#define CSIPHY_NUM_CLK_MAX 16
+#define MAX_CSIPHY_REG_ARRAY 70
+#define MAX_CSIPHY_CMN_REG_ARRAY 5
+
+#define MAX_LANES 5
+#define MAX_SETTINGS_PER_LANE 43
+
+#define MAX_REGULATOR 5
+#define CAMX_CSIPHY_DEV_NAME "cam-csiphy-driver"
+
+#define CSIPHY_POWER_UP 0
+#define CSIPHY_POWER_DOWN 1
+
+#define CSIPHY_DEFAULT_PARAMS 0
+#define CSIPHY_LANE_ENABLE 1
+#define CSIPHY_SETTLE_CNT_LOWER_BYTE 2
+#define CSIPHY_SETTLE_CNT_HIGHER_BYTE 3
+#define CSIPHY_DNP_PARAMS 4
+#define CSIPHY_2PH_REGS 5
+#define CSIPHY_3PH_REGS 6
+
+#define CSIPHY_MAX_INSTANCES 2
+
+#define CAM_CSIPHY_MAX_DPHY_LANES 4
+#define CAM_CSIPHY_MAX_CPHY_LANES 3
+
+#define ENABLE_IRQ false
+
+#undef CDBG
+#ifdef CAM_CSIPHY_CORE_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+enum cam_csiphy_state {
+ CAM_CSIPHY_INIT,
+ CAM_CSIPHY_ACQUIRE,
+ CAM_CSIPHY_START,
+};
+
+/**
+ * struct csiphy_reg_parms_t
+ * @mipi_csiphy_glbl_irq_cmd_addr: CSIPhy irq addr
+ * @mipi_csiphy_interrupt_status0_addr:
+ * CSIPhy interrupt status addr
+ * @mipi_csiphy_interrupt_mask0_addr:
+ * CSIPhy interrupt mask addr
+ * @mipi_csiphy_interrupt_mask_val:
+ * CSIPhy interrupt mask val
+ * @mipi_csiphy_interrupt_clear0_addr:
+ * CSIPhy interrupt clear addr
+ * @csiphy_version: CSIPhy Version
+ * @csiphy_common_array_size: CSIPhy common array size
+ * @csiphy_reset_array_size: CSIPhy reset array size
+ * @csiphy_2ph_config_array_size: 2ph settings size
+ * @csiphy_3ph_config_array_size: 3ph settings size
+ * @csiphy_cpas_cp_bits_per_phy: CP bits per phy
+ * @csiphy_cpas_cp_is_interleaved: checks whether cp bits
+ * are interleaved or not
+ * @csiphy_cpas_cp_2ph_offset: cp register 2ph offset
+ * @csiphy_cpas_cp_3ph_offset: cp register 3ph offset
+ * @csiphy_2ph_clock_lane: clock lane in 2ph
+ * @csiphy_2ph_combo_ck_ln: clk lane in combo 2ph
+ */
+struct csiphy_reg_parms_t {
+/*MIPI CSI PHY registers*/
+ uint32_t mipi_csiphy_glbl_irq_cmd_addr;
+ uint32_t mipi_csiphy_interrupt_status0_addr;
+ uint32_t mipi_csiphy_interrupt_mask0_addr;
+ uint32_t mipi_csiphy_interrupt_mask_val;
+ uint32_t mipi_csiphy_interrupt_mask_addr;
+ uint32_t mipi_csiphy_interrupt_clear0_addr;
+ uint32_t csiphy_version;
+ uint32_t csiphy_common_array_size;
+ uint32_t csiphy_reset_array_size;
+ uint32_t csiphy_2ph_config_array_size;
+ uint32_t csiphy_3ph_config_array_size;
+ uint32_t csiphy_cpas_cp_bits_per_phy;
+ uint32_t csiphy_cpas_cp_is_interleaved;
+ uint32_t csiphy_cpas_cp_2ph_offset;
+ uint32_t csiphy_cpas_cp_3ph_offset;
+ uint32_t csiphy_2ph_clock_lane;
+ uint32_t csiphy_2ph_combo_ck_ln;
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+ int32_t device_hdl[CSIPHY_MAX_INSTANCES];
+ int32_t session_hdl[CSIPHY_MAX_INSTANCES];
+ int32_t link_hdl[CSIPHY_MAX_INSTANCES];
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct csiphy_reg_t
+ * @reg_addr: Register address
+ * @reg_data: Register data
+ * @delay: Delay
+ * @csiphy_param_type: CSIPhy parameter type
+ */
+struct csiphy_reg_t {
+ int32_t reg_addr;
+ int32_t reg_data;
+ int32_t delay;
+ uint32_t csiphy_param_type;
+};
+
+/**
+ * struct csiphy_ctrl_t
+ * @csiphy_reg: Register address
+ * @csiphy_common_reg: Common register set
+ * @csiphy_reset_reg: Reset register set
+ * @csiphy_2ph_reg: 2phase register set
+ * @csiphy_2ph_combo_mode_reg:
+ * 2phase combo register set
+ * @csiphy_3ph_reg: 3phase register set
+ * @csiphy_2ph_3ph_mode_reg:
+ * 2 phase 3phase combo register set
+ */
+struct csiphy_ctrl_t {
+ struct csiphy_reg_parms_t csiphy_reg;
+ struct csiphy_reg_t *csiphy_common_reg;
+ struct csiphy_reg_t *csiphy_irq_reg;
+ struct csiphy_reg_t *csiphy_reset_reg;
+ struct csiphy_reg_t (*csiphy_2ph_reg)[MAX_SETTINGS_PER_LANE];
+ struct csiphy_reg_t (*csiphy_2ph_combo_mode_reg)[MAX_SETTINGS_PER_LANE];
+ struct csiphy_reg_t (*csiphy_3ph_reg)[MAX_SETTINGS_PER_LANE];
+ struct csiphy_reg_t (*csiphy_2ph_3ph_mode_reg)[MAX_SETTINGS_PER_LANE];
+};
+
+/**
+ * cam_csiphy_param: Provides cmdbuffer structre
+ * @lane_mask : Lane mask details
+ * @lane_assign : Lane sensor will be using
+ * @csiphy_3phase : Mentions DPHY or CPHY
+ * @combo_mode : Info regarding combo_mode is enable / disable
+ * @lane_cnt : Total number of lanes
+ * @reserved
+ * @3phase : Details whether 3Phase / 2Phase operation
+ * @settle_time : Settling time in ms
+ * @settle_time_combo_sensor : Settling time in ms
+ * @data_rate : Data rate in mbps
+ *
+ */
+struct cam_csiphy_param {
+ uint16_t lane_mask;
+ uint16_t lane_assign;
+ uint8_t csiphy_3phase;
+ uint8_t combo_mode;
+ uint8_t lane_cnt;
+ uint8_t secure_mode[CSIPHY_MAX_INSTANCES];
+ uint64_t settle_time;
+ uint64_t settle_time_combo_sensor;
+ uint64_t data_rate;
+};
+
+/**
+ * struct csiphy_device
+ * @pdev: Platform device
+ * @irq: Interrupt structure
+ * @base: Base address
+ * @hw_version: Hardware Version
+ * @csiphy_state: CSIPhy state
+ * @ctrl_reg: CSIPhy control registers
+ * @num_clk: Number of clocks
+ * @csiphy_max_clk: Max timer clock rate
+ * @num_vreg: Number of regulators
+ * @csiphy_clk: Clock structure
+ * @csiphy_clk_info: Clock information structure
+ * @csiphy_vreg: Regulator structure
+ * @csiphy_reg_ptr: Regulator structure
+ * @csiphy_3p_clk_info: 3Phase clock information
+ * @csiphy_3p_clk: 3Phase clocks structure
+ * @csi_3phase: Is it a 3Phase mode
+ * @ref_count: Reference count
+ * @clk_lane: Clock lane
+ * @acquire_count: Acquire device count
+ * @start_dev_count: Start count
+ * @is_acquired_dev_combo_mode:
+ * Flag that mentions whether already acquired
+ * device is for combo mode
+ * @soc_info: SOC information
+ * @cpas_handle: CPAS handle
+ * @config_count: Config reg count
+ * @csiphy_cpas_cp_reg_mask: CP reg mask for phy instance
+ */
+struct csiphy_device {
+ struct mutex mutex;
+ uint32_t hw_version;
+ enum cam_csiphy_state csiphy_state;
+ struct csiphy_ctrl_t *ctrl_reg;
+ uint32_t csiphy_max_clk;
+ struct msm_cam_clk_info csiphy_3p_clk_info[2];
+ struct clk *csiphy_3p_clk[2];
+ unsigned char csi_3phase;
+ int32_t ref_count;
+ uint16_t lane_mask[MAX_CSIPHY];
+ uint8_t is_csiphy_3phase_hw;
+ uint8_t num_irq_registers;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_csiphy_param csiphy_info;
+ struct intf_params bridge_intf;
+ uint32_t clk_lane;
+ uint32_t acquire_count;
+ uint32_t start_dev_count;
+ char device_name[20];
+ uint32_t is_acquired_dev_combo_mode;
+ struct cam_hw_soc_info soc_info;
+ uint32_t cpas_handle;
+ uint32_t config_count;
+ uint64_t csiphy_cpas_cp_reg_mask[CSIPHY_MAX_INSTANCES];
+};
+
+#endif /* _CAM_CSIPHY_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
new file mode 100644
index 000000000000..0902601cebd5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -0,0 +1,284 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_csiphy_soc.h"
+#include "cam_csiphy_core.h"
+#include "include/cam_csiphy_1_1_hwreg.h"
+#include "include/cam_csiphy_1_0_hwreg.h"
+#include "include/cam_csiphy_1_2_hwreg.h"
+#include "include/cam_csiphy_2_0_hwreg.h"
+
+#define BYTES_PER_REGISTER 4
+#define NUM_REGISTER_PER_LINE 4
+#define REG_OFFSET(__start, __i) ((__start) + ((__i) * BYTES_PER_REGISTER))
+
+static int cam_io_phy_dump(void __iomem *base_addr,
+ uint32_t start_offset, int size)
+{
+ char line_str[128];
+ char *p_str;
+ int i;
+ uint32_t data;
+
+ CAM_INFO(CAM_CSIPHY, "addr=%pK offset=0x%x size=%d",
+ base_addr, start_offset, size);
+
+ if (!base_addr || (size <= 0))
+ return -EINVAL;
+
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size; i++) {
+ if (i % NUM_REGISTER_PER_LINE == 0) {
+ snprintf(p_str, 12, "0x%08x: ",
+ REG_OFFSET(start_offset, i));
+ p_str += 11;
+ }
+ data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
+ snprintf(p_str, 9, "%08x ", data);
+ p_str += 8;
+ if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
+ CAM_ERR(CAM_CSIPHY, "%s", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ CAM_ERR(CAM_CSIPHY, "%s", line_str);
+
+ return 0;
+}
+
+int32_t cam_csiphy_mem_dmp(struct cam_hw_soc_info *soc_info)
+{
+ int32_t rc = 0;
+ resource_size_t size = 0;
+ void __iomem *addr = NULL;
+
+ if (!soc_info) {
+ rc = -EINVAL;
+ CAM_ERR(CAM_CSIPHY, "invalid input %d", rc);
+ return rc;
+ }
+ addr = soc_info->reg_map[0].mem_base;
+ size = resource_size(soc_info->mem_block[0]);
+ rc = cam_io_phy_dump(addr, 0, (size >> 2));
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "generating dump failed %d", rc);
+ return rc;
+ }
+ return rc;
+}
+
+int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
+{
+ int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info;
+
+ soc_info = &csiphy_dev->soc_info;
+
+ if (csiphy_dev->ref_count++) {
+ CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+ csiphy_dev->ref_count);
+ return rc;
+ }
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, ENABLE_IRQ);
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "failed to enable platform resources %d",
+ rc);
+ return rc;
+ }
+
+ rc = cam_soc_util_set_src_clk_rate(soc_info,
+ soc_info->clk_rate[0][soc_info->src_clk_idx]);
+
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "csiphy_clk_set_rate failed rc: %d", rc);
+ goto csiphy_disable_platform_resource;
+ }
+
+ cam_csiphy_reset(csiphy_dev);
+
+ return rc;
+
+
+csiphy_disable_platform_resource:
+ cam_soc_util_disable_platform_resource(soc_info, true, true);
+
+ return rc;
+}
+
+int32_t cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev)
+{
+ struct cam_hw_soc_info *soc_info;
+
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ CAM_ERR(CAM_CSIPHY, "csiphy dev NULL / ref_count ZERO");
+ return 0;
+ }
+ soc_info = &csiphy_dev->soc_info;
+
+ if (--csiphy_dev->ref_count) {
+ CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+ csiphy_dev->ref_count);
+ return 0;
+ }
+
+ cam_csiphy_reset(csiphy_dev);
+
+ cam_soc_util_disable_platform_resource(soc_info, true, true);
+
+ return 0;
+}
+
+int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
+ struct csiphy_device *csiphy_dev)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t clk_cnt = 0;
+ char *csi_3p_clk_name = "csi_phy_3p_clk";
+ char *csi_3p_clk_src_name = "csiphy_3p_clk_src";
+ struct cam_hw_soc_info *soc_info;
+
+ csiphy_dev->is_csiphy_3phase_hw = 0;
+ soc_info = &csiphy_dev->soc_info;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "parsing common soc dt(rc %d)", rc);
+ return rc;
+ }
+
+ csiphy_dev->is_csiphy_3phase_hw = 0;
+
+ if (of_device_is_compatible(soc_info->dev->of_node,
+ "qcom,csiphy-v1.0")) {
+ csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_0_reg;
+ csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+ csiphy_2ph_v1_0_combo_mode_reg;
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v1_0_reg;
+ csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg =
+ csiphy_3ph_v1_0_combo_mode_reg;
+ csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_0;
+ csiphy_dev->ctrl_reg->csiphy_common_reg = csiphy_common_reg_1_0;
+ csiphy_dev->ctrl_reg->csiphy_reset_reg = csiphy_reset_reg_1_0;
+ csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_0;
+ csiphy_dev->hw_version = CSIPHY_VERSION_V10;
+ csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+ csiphy_dev->clk_lane = 0;
+ } else if (of_device_is_compatible(soc_info->dev->of_node,
+ "qcom,csiphy-v1.1")) {
+ csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_1_reg;
+ csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+ csiphy_2ph_v1_1_combo_mode_reg;
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v1_1_reg;
+ csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg =
+ csiphy_3ph_v1_1_combo_mode_reg;
+ csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_1;
+ csiphy_dev->ctrl_reg->csiphy_common_reg =
+ csiphy_common_reg_1_1;
+ csiphy_dev->ctrl_reg->csiphy_reset_reg =
+ csiphy_reset_reg_1_1;
+ csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_1;
+ csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+ csiphy_dev->hw_version = CSIPHY_VERSION_V11;
+ csiphy_dev->clk_lane = 0;
+ } else if (of_device_is_compatible(soc_info->dev->of_node,
+ "qcom,csiphy-v1.2")) {
+ csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_2_reg;
+ csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+ csiphy_2ph_v1_2_combo_mode_reg;
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v1_2_reg;
+ csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg = NULL;
+ csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_2;
+ csiphy_dev->ctrl_reg->csiphy_common_reg =
+ csiphy_common_reg_1_2;
+ csiphy_dev->ctrl_reg->csiphy_reset_reg =
+ csiphy_reset_reg_1_2;
+ csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_2;
+ csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+ csiphy_dev->hw_version = CSIPHY_VERSION_V12;
+ csiphy_dev->clk_lane = 0;
+ } else if (of_device_is_compatible(soc_info->dev->of_node,
+ "qcom,csiphy-v2.0")) {
+ csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v2_0_reg;
+ csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+ csiphy_2ph_v2_0_combo_mode_reg;
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v2_0_reg;
+ csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg = NULL;
+ csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_2_0;
+ csiphy_dev->ctrl_reg->csiphy_common_reg = csiphy_common_reg_2_0;
+ csiphy_dev->ctrl_reg->csiphy_reset_reg = csiphy_reset_reg_2_0;
+ csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v2_0;
+ csiphy_dev->hw_version = CSIPHY_VERSION_V20;
+ csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+ csiphy_dev->clk_lane = 0;
+ } else {
+ CAM_ERR(CAM_CSIPHY, "invalid hw version : 0x%x",
+ csiphy_dev->hw_version);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ if (soc_info->num_clk > CSIPHY_NUM_CLK_MAX) {
+ CAM_ERR(CAM_CSIPHY, "invalid clk count=%d, max is %d",
+ soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
+ return -EINVAL;
+ }
+ for (i = 0; i < soc_info->num_clk; i++) {
+ if (!strcmp(soc_info->clk_name[i],
+ csi_3p_clk_src_name)) {
+ csiphy_dev->csiphy_3p_clk_info[0].clk_name =
+ soc_info->clk_name[i];
+ csiphy_dev->csiphy_3p_clk_info[0].clk_rate =
+ soc_info->clk_rate[0][i];
+ csiphy_dev->csiphy_3p_clk[0] =
+ soc_info->clk[i];
+ continue;
+ } else if (!strcmp(soc_info->clk_name[i],
+ csi_3p_clk_name)) {
+ csiphy_dev->csiphy_3p_clk_info[1].clk_name =
+ soc_info->clk_name[i];
+ csiphy_dev->csiphy_3p_clk_info[1].clk_rate =
+ soc_info->clk_rate[0][i];
+ csiphy_dev->csiphy_3p_clk[1] =
+ soc_info->clk[i];
+ continue;
+ }
+
+ CAM_DBG(CAM_CSIPHY, "clk_rate[%d] = %d", clk_cnt,
+ soc_info->clk_rate[0][clk_cnt]);
+ clk_cnt++;
+ }
+
+ csiphy_dev->csiphy_max_clk =
+ soc_info->clk_rate[0][soc_info->src_clk_idx];
+
+ rc = cam_soc_util_request_platform_resource(&csiphy_dev->soc_info,
+ cam_csiphy_irq, csiphy_dev);
+
+ return rc;
+}
+
+int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev)
+{
+ if (!csiphy_dev) {
+ CAM_ERR(CAM_CSIPHY, "csiphy dev NULL");
+ return 0;
+ }
+
+ cam_soc_util_release_platform_resource(&csiphy_dev->soc_info);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
new file mode 100644
index 000000000000..68ca68ced31b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_SOC_H_
+#define _CAM_CSIPHY_SOC_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include "cam_csiphy_dev.h"
+#include "cam_csiphy_core.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define CSI_3PHASE_HW 1
+#define CSIPHY_VERSION_V35 0x35
+#define CSIPHY_VERSION_V10 0x10
+#define CSIPHY_VERSION_V11 0x11
+#define CSIPHY_VERSION_V12 0x12
+#define CSIPHY_VERSION_V20 0x20
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API release SOC related parameters
+ */
+int cam_csiphy_soc_release(struct csiphy_device *csiphy_dev);
+
+/**
+ * @pdev: Platform device
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API parses csiphy device tree information
+ */
+int cam_csiphy_parse_dt_info(struct platform_device *pdev,
+ struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API enables SOC related parameters
+ */
+int cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API disables SOC related parameters
+ */
+int cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev);
+
+/**
+ * @soc_info: Soc info of cam hw driver module
+ *
+ * This API dumps memory for the entire mapped region
+ * (needs to be macro enabled before use)
+ */
+int cam_csiphy_mem_dmp(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_CSIPHY_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
new file mode 100644
index 000000000000..324509340054
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
@@ -0,0 +1,355 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_1_0_HWREG_H_
+#define _CAM_CSIPHY_1_0_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v1_0 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+ .csiphy_common_array_size = 5,
+ .csiphy_reset_array_size = 5,
+ .csiphy_2ph_config_array_size = 14,
+ .csiphy_3ph_config_array_size = 19,
+};
+
+struct csiphy_reg_t csiphy_common_reg_1_0[] = {
+ {0x0814, 0x00, 0x00, CSIPHY_LANE_ENABLE},
+ {0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_1_0[] = {
+ {0x0814, 0x00, 0x05, CSIPHY_LANE_ENABLE},
+ {0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_1_0[] = {
+ {0x082c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0830, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0834, 0xFB, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0838, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x083c, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0840, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0844, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0848, 0xEF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x084c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0850, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0854, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_2ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct csiphy_reg_t
+ csiphy_2ph_v1_0_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x0A, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x0A, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct csiphy_reg_t csiphy_3ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x015C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0168, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x016C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0144, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x035C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0368, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x036C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0344, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x055C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0568, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x056C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0544, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct csiphy_reg_t
+ csiphy_3ph_v1_0_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x015C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0168, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x016C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0144, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x035C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0368, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x036C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0344, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x055C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0568, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x056C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0544, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+#endif /* _CAM_CSIPHY_1_0_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h
new file mode 100644
index 000000000000..b1ceb0109155
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h
@@ -0,0 +1,506 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_1_1_HWREG_H_
+#define _CAM_CSIPHY_1_1_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v1_1 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+ .csiphy_common_array_size = 5,
+ .csiphy_reset_array_size = 5,
+ .csiphy_2ph_config_array_size = 14,
+ .csiphy_3ph_config_array_size = 43,
+ .csiphy_2ph_clock_lane = 0x1,
+ .csiphy_2ph_combo_ck_ln = 0x10,
+};
+
+struct csiphy_reg_t csiphy_common_reg_1_1[] = {
+ {0x0814, 0xd5, 0x00, CSIPHY_LANE_ENABLE},
+ {0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_1_1[] = {
+ {0x0814, 0x00, 0x05, CSIPHY_LANE_ENABLE},
+ {0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_1_1[] = {
+ {0x082c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0830, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0834, 0xFB, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0838, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x083c, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0840, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0844, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0848, 0xEF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x084c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0850, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0854, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct
+csiphy_reg_t csiphy_2ph_v1_1_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct csiphy_reg_t
+ csiphy_2ph_v1_1_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct
+csiphy_reg_t csiphy_3ph_v1_1_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0144, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0990, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0994, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0998, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0990, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0994, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0998, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x098C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x098C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0168, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x016C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0188, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x018C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0190, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0144, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09B0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09B0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0980, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09B0, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0984, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0988, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09AC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0164, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0344, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A90, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A94, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A98, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A8C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A8C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0368, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x036C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0388, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x038C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0390, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0344, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A80, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AB0, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AAC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0364, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0544, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B90, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B94, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B98, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B8C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B8C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0568, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x056C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0588, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x058C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0590, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0544, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B80, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BB0, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BAC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0564, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct csiphy_reg_t
+ csiphy_3ph_v1_1_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0144, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0990, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0994, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0998, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0990, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0994, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0998, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x098C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x098C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0168, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x016C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0188, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x018C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0190, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0144, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09B0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09B0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0980, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09B0, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x015C, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0984, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0988, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09AC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0164, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DNP_PARAMS},
+ },
+ {
+ {0x0344, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A90, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A94, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A98, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A8C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A8C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0368, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x036C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0388, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x038C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0390, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0344, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A80, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AB0, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x035C, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AAC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0364, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DNP_PARAMS},
+ },
+ {
+ {0x0544, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B90, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B94, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B98, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B8C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B8C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0568, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x056C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0588, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x058C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0590, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0544, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B80, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BB0, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x055C, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BAC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0564, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+#endif /* _CAM_CSIPHY_D5_0_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h
new file mode 100644
index 000000000000..a16fb888edc6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h
@@ -0,0 +1,399 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_1_2_HWREG_H_
+#define _CAM_CSIPHY_1_2_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v1_2 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+ .csiphy_common_array_size = 4,
+ .csiphy_reset_array_size = 4,
+ .csiphy_2ph_config_array_size = 21,
+ .csiphy_3ph_config_array_size = 31,
+ .csiphy_2ph_clock_lane = 0x1,
+ .csiphy_2ph_combo_ck_ln = 0x10,
+};
+
+struct csiphy_reg_t csiphy_common_reg_1_2[] = {
+ {0x0814, 0xd5, 0x00, CSIPHY_LANE_ENABLE},
+ {0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_1_2[] = {
+ {0x0814, 0x00, 0x05, CSIPHY_LANE_ENABLE},
+ {0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_1_2[] = {
+ {0x082c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0830, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0834, 0xFB, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0838, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x083c, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0840, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0844, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0848, 0xEF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x084c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0850, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0854, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct
+csiphy_reg_t csiphy_2ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0910, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0900, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0908, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x00C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C80, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C88, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x07C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A00, 0x0B, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A08, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x02C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B00, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B08, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x04C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x06C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct csiphy_reg_t
+ csiphy_2ph_v1_2_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0910, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0900, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0908, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x00C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C80, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C88, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x07C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A00, 0x0B, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A08, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x02C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B00, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B08, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x04C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x06C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct
+csiphy_reg_t csiphy_3ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0990, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0994, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0998, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0990, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0994, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0998, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x098C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0168, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x016C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0188, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x018C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0190, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0144, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0164, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x09B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x036C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0388, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x038C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0390, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0344, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0364, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0AB0, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x056C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0588, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x058C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0590, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0544, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0564, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0BB0, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+#endif /* _CAM_CSIPHY_1_2_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_0_hwreg.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_0_hwreg.h
new file mode 100644
index 000000000000..8d7a5b58215a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_0_hwreg.h
@@ -0,0 +1,296 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_2_0_HWREG_H_
+#define _CAM_CSIPHY_2_0_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v2_0 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+ .csiphy_common_array_size = 6,
+ .csiphy_reset_array_size = 3,
+ .csiphy_2ph_config_array_size = 15,
+ .csiphy_3ph_config_array_size = 17,
+ .csiphy_2ph_clock_lane = 0x1,
+ .csiphy_2ph_combo_ck_ln = 0x10,
+};
+
+struct csiphy_reg_t csiphy_common_reg_2_0[] = {
+ {0x0814, 0x00, 0x00, CSIPHY_LANE_ENABLE},
+ {0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x06, 0x00, CSIPHY_3PH_REGS},
+ {0x0164, 0x00, 0x00, CSIPHY_2PH_REGS},
+ {0x0364, 0x00, 0x00, CSIPHY_2PH_REGS},
+ {0x0564, 0x00, 0x00, CSIPHY_2PH_REGS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_2_0[] = {
+ {0x0814, 0x00, 0x05, CSIPHY_LANE_ENABLE},
+ {0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_2_0[] = {
+ {0x082c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0830, 0xff, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0834, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0838, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x083c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0840, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0844, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0848, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x084c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0850, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0854, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_2ph_v2_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0030, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0730, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0230, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0430, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0630, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct csiphy_reg_t
+ csiphy_2ph_v2_0_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x0030, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0730, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0230, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0430, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x0630, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+struct csiphy_reg_t csiphy_3ph_v2_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+ {
+ {0x015C, 0x23, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0168, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x016C, 0x17, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0144, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0164, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x035C, 0x23, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0368, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x036C, 0x17, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0344, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0364, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+ {
+ {0x055C, 0x23, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+ {0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0568, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x056C, 0x17, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0544, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0564, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+ },
+};
+
+#endif /* _CAM_CSIPHY_2_0_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/Makefile
new file mode 100644
index 000000000000..f0a05e3c9814
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom_dev.o cam_eeprom_core.o cam_eeprom_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
new file mode 100644
index 000000000000..7d7c1a2977e5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -0,0 +1,946 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <media/cam_sensor.h>
+
+#include "cam_eeprom_core.h"
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+#include "cam_common_util.h"
+
+/**
+ * cam_eeprom_read_memory() - read map data into buffer
+ * @e_ctrl: eeprom control struct
+ * @block: block to be read
+ *
+ * This function iterates through blocks stored in block->map, reads each
+ * region and concatenate them into the pre-allocated block->mapdata
+ */
+static int cam_eeprom_read_memory(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_memory_block_t *block)
+{
+ int rc = 0;
+ int j;
+ struct cam_sensor_i2c_reg_setting i2c_reg_settings = {
+ NULL, 0, 0, 0, 0};
+ struct cam_sensor_i2c_reg_array i2c_reg_array;
+ struct cam_eeprom_memory_map_t *emap = block->map;
+ struct cam_eeprom_soc_private *eb_info;
+ uint8_t *memptr = block->mapdata;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "e_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ eb_info = (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+ for (j = 0; j < block->num_map; j++) {
+ CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X", emap[j].saddr);
+ if (emap[j].saddr) {
+ eb_info->i2c_info.slave_addr = emap[j].saddr;
+ rc = cam_eeprom_update_i2c_info(e_ctrl,
+ &eb_info->i2c_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed: to update i2c info rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].page.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].page.addr_type;
+ i2c_reg_settings.data_type = emap[j].page.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].page.addr;
+ i2c_reg_array.reg_data = emap[j].page.data;
+ i2c_reg_array.delay = emap[j].page.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "page write failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].pageen.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+ i2c_reg_settings.data_type = emap[j].pageen.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].pageen.addr;
+ i2c_reg_array.reg_data = emap[j].pageen.data;
+ i2c_reg_array.delay = emap[j].pageen.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "page enable failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].poll.valid_size) {
+ rc = camera_io_dev_poll(&e_ctrl->io_master_info,
+ emap[j].poll.addr, emap[j].poll.data,
+ 0, emap[j].poll.addr_type,
+ emap[j].poll.data_type,
+ emap[j].poll.delay);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "poll failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].mem.valid_size) {
+ rc = camera_io_dev_read_seq(&e_ctrl->io_master_info,
+ emap[j].mem.addr, memptr,
+ emap[j].mem.addr_type,
+ emap[j].mem.data_type,
+ emap[j].mem.valid_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "read failed rc %d",
+ rc);
+ return rc;
+ }
+ memptr += emap[j].mem.valid_size;
+ }
+
+ if (emap[j].pageen.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+ i2c_reg_settings.data_type = emap[j].pageen.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].pageen.addr;
+ i2c_reg_array.reg_data = 0;
+ i2c_reg_array.delay = emap[j].pageen.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "page disable failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_power_up - Power up eeprom hardware
+ * @e_ctrl: ctrl structure
+ * @power_info: power up/down info for eeprom
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_up(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &e_ctrl->soc_info;
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(
+ &e_ctrl->soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to fill power up vreg params rc:%d", rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ &e_ctrl->soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to fill power down vreg params rc:%d", rc);
+ return rc;
+ }
+
+ power_info->dev = soc_info->dev;
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed in eeprom power up rc %d", rc);
+ return rc;
+ }
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+ rc = camera_io_init(&(e_ctrl->io_master_info));
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "cci_init failed");
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_power_down - Power down eeprom hardware
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_down(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_eeprom_soc_private *soc_private;
+ int rc = 0;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "failed: e_ctrl %pK", e_ctrl);
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+ soc_info = &e_ctrl->soc_info;
+
+ if (!power_info) {
+ CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info);
+ return -EINVAL;
+ }
+ rc = cam_sensor_util_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc);
+ return rc;
+ }
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER)
+ camera_io_release(&(e_ctrl->io_master_info));
+
+ return rc;
+}
+
+/**
+ * cam_eeprom_match_id - match eeprom id
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_match_id(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc;
+ struct camera_io_master *client = &e_ctrl->io_master_info;
+ uint8_t id[2];
+
+ rc = cam_spi_query_id(client, 0, CAMERA_SENSOR_I2C_TYPE_WORD,
+ &id[0], 2);
+ if (rc)
+ return rc;
+ CAM_DBG(CAM_EEPROM, "read 0x%x 0x%x, check 0x%x 0x%x",
+ id[0], id[1], client->spi_client->mfr_id0,
+ client->spi_client->device_id0);
+ if (id[0] != client->spi_client->mfr_id0
+ || id[1] != client->spi_client->device_id0)
+ return -ENODEV;
+ return 0;
+}
+
+/**
+ * cam_eeprom_parse_read_memory_map - Parse memory map
+ * @of_node: device node
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+ struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "failed: e_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ rc = cam_eeprom_parse_dt_memory_map(of_node, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: eeprom dt parse rc %d", rc);
+ return rc;
+ }
+ rc = cam_eeprom_power_up(e_ctrl, power_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: eeprom power up rc %d", rc);
+ goto data_mem_free;
+ }
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG;
+ if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) {
+ rc = cam_eeprom_match_id(e_ctrl);
+ if (rc) {
+ CAM_DBG(CAM_EEPROM, "eeprom not matching %d", rc);
+ goto power_down;
+ }
+ }
+ rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "read_eeprom_memory failed");
+ goto power_down;
+ }
+
+ rc = cam_eeprom_power_down(e_ctrl);
+ if (rc)
+ CAM_ERR(CAM_EEPROM, "failed: eeprom power down rc %d", rc);
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+ return rc;
+power_down:
+ cam_eeprom_power_down(e_ctrl);
+data_mem_free:
+ vfree(e_ctrl->cal_data.mapdata);
+ vfree(e_ctrl->cal_data.map);
+ e_ctrl->cal_data.num_data = 0;
+ e_ctrl->cal_data.num_map = 0;
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+ return rc;
+}
+
+/**
+ * cam_eeprom_get_dev_handle - get device handle
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl,
+ void *arg)
+{
+ struct cam_sensor_acquire_dev eeprom_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (e_ctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_EEPROM, "Device is already acquired");
+ return -EFAULT;
+ }
+ if (copy_from_user(&eeprom_acq_dev,
+ u64_to_user_ptr(cmd->handle),
+ sizeof(eeprom_acq_dev))) {
+ CAM_ERR(CAM_EEPROM,
+ "EEPROM:ACQUIRE_DEV: copy from user failed");
+ return -EFAULT;
+ }
+
+ bridge_params.session_hdl = eeprom_acq_dev.session_handle;
+ bridge_params.ops = &e_ctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = e_ctrl;
+
+ eeprom_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ e_ctrl->bridge_intf.device_hdl = eeprom_acq_dev.device_handle;
+ e_ctrl->bridge_intf.session_hdl = eeprom_acq_dev.session_handle;
+
+ CAM_DBG(CAM_EEPROM, "Device Handle: %d", eeprom_acq_dev.device_handle);
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &eeprom_acq_dev, sizeof(struct cam_sensor_acquire_dev))) {
+ CAM_ERR(CAM_EEPROM, "EEPROM:ACQUIRE_DEV: copy to user failed");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * cam_eeprom_update_slaveInfo - Update slave info
+ * @e_ctrl: ctrl structure
+ * @cmd_buf: command buffer
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_update_slaveInfo(struct cam_eeprom_ctrl_t *e_ctrl,
+ void *cmd_buf)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_soc_private *soc_private;
+ struct cam_cmd_i2c_info *cmd_i2c_info = NULL;
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ cmd_i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+ soc_private->i2c_info.slave_addr = cmd_i2c_info->slave_addr;
+ soc_private->i2c_info.i2c_freq_mode = cmd_i2c_info->i2c_freq_mode;
+
+ rc = cam_eeprom_update_i2c_info(e_ctrl,
+ &soc_private->i2c_info);
+ CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x Freq Mode: %d",
+ soc_private->i2c_info.slave_addr,
+ soc_private->i2c_info.i2c_freq_mode);
+
+ return rc;
+}
+
+/**
+ * cam_eeprom_parse_memory_map - Parse memory map info
+ * @data: memory block data
+ * @cmd_buf: command buffer
+ * @cmd_length: command buffer length
+ * @num_map: memory map size
+ * @cmd_length_bytes: command length processed in this function
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_parse_memory_map(
+ struct cam_eeprom_memory_block_t *data,
+ void *cmd_buf, int cmd_length, uint16_t *cmd_length_bytes,
+ int *num_map)
+{
+ int32_t rc = 0;
+ int32_t cnt = 0;
+ int32_t processed_size = 0;
+ uint8_t generic_op_code;
+ struct cam_eeprom_memory_map_t *map = data->map;
+ struct common_header *cmm_hdr =
+ (struct common_header *)cmd_buf;
+ uint16_t cmd_length_in_bytes = 0;
+ struct cam_cmd_i2c_random_wr *i2c_random_wr = NULL;
+ struct cam_cmd_i2c_continuous_rd *i2c_cont_rd = NULL;
+ struct cam_cmd_conditional_wait *i2c_poll = NULL;
+ struct cam_cmd_unconditional_wait *i2c_uncond_wait = NULL;
+
+ generic_op_code = cmm_hdr->third_byte;
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+ i2c_random_wr = (struct cam_cmd_i2c_random_wr *)cmd_buf;
+ cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_random_wr) +
+ ((i2c_random_wr->header.count - 1) *
+ sizeof(struct i2c_random_wr_payload));
+
+ for (cnt = 0; cnt < (i2c_random_wr->header.count);
+ cnt++) {
+ map[*num_map + cnt].page.addr =
+ i2c_random_wr->random_wr_payload[cnt].reg_addr;
+ map[*num_map + cnt].page.addr_type =
+ i2c_random_wr->header.addr_type;
+ map[*num_map + cnt].page.data =
+ i2c_random_wr->random_wr_payload[cnt].reg_data;
+ map[*num_map + cnt].page.data_type =
+ i2c_random_wr->header.data_type;
+ map[*num_map + cnt].page.valid_size = 1;
+ }
+
+ *num_map += (i2c_random_wr->header.count - 1);
+ cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+ processed_size +=
+ cmd_length_in_bytes;
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+ i2c_cont_rd = (struct cam_cmd_i2c_continuous_rd *)cmd_buf;
+ cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd);
+
+ map[*num_map].mem.addr = i2c_cont_rd->reg_addr;
+ map[*num_map].mem.addr_type = i2c_cont_rd->header.addr_type;
+ map[*num_map].mem.data_type = i2c_cont_rd->header.data_type;
+ map[*num_map].mem.valid_size =
+ i2c_cont_rd->header.count;
+ cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+ processed_size +=
+ cmd_length_in_bytes;
+ data->num_data += map[*num_map].mem.valid_size;
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_WAIT:
+ if (generic_op_code ==
+ CAMERA_SENSOR_WAIT_OP_HW_UCND ||
+ generic_op_code ==
+ CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+ i2c_uncond_wait =
+ (struct cam_cmd_unconditional_wait *)cmd_buf;
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_unconditional_wait);
+
+ if (*num_map < 1) {
+ CAM_ERR(CAM_EEPROM,
+ "invalid map number, num_map=%d",
+ *num_map);
+ return -EINVAL;
+ }
+
+ /*
+ * Though delay is added all of them, but delay will
+ * be applicable to only one of them as only one of
+ * them will have valid_size set to >= 1.
+ */
+ map[*num_map - 1].mem.delay = i2c_uncond_wait->delay;
+ map[*num_map - 1].page.delay = i2c_uncond_wait->delay;
+ map[*num_map - 1].pageen.delay = i2c_uncond_wait->delay;
+ } else if (generic_op_code ==
+ CAMERA_SENSOR_WAIT_OP_COND) {
+ i2c_poll = (struct cam_cmd_conditional_wait *)cmd_buf;
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_conditional_wait);
+
+ map[*num_map].poll.addr = i2c_poll->reg_addr;
+ map[*num_map].poll.addr_type = i2c_poll->addr_type;
+ map[*num_map].poll.data = i2c_poll->reg_data;
+ map[*num_map].poll.data_type = i2c_poll->data_type;
+ map[*num_map].poll.delay = i2c_poll->timeout;
+ map[*num_map].poll.valid_size = 1;
+ }
+ cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+ processed_size +=
+ cmd_length_in_bytes;
+ break;
+ default:
+ break;
+ }
+
+ *cmd_length_bytes = processed_size;
+ return rc;
+}
+
+/**
+ * cam_eeprom_init_pkt_parser - Parse eeprom packet
+ * @e_ctrl: ctrl structure
+ * @csl_packet: csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_packet *csl_packet)
+{
+ int32_t rc = 0;
+ int i = 0;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ uint32_t *offset = NULL;
+ uint32_t *cmd_buf = NULL;
+ uintptr_t generic_pkt_addr;
+ size_t pkt_len = 0;
+ uint32_t total_cmd_buf_in_bytes = 0;
+ uint32_t processed_cmd_buf_in_bytes = 0;
+ struct common_header *cmm_hdr = NULL;
+ uint16_t cmd_length_in_bytes = 0;
+ struct cam_cmd_i2c_info *i2c_info = NULL;
+ int num_map = -1;
+ struct cam_eeprom_memory_map_t *map = NULL;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+ e_ctrl->cal_data.map = vzalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
+ MSM_EEPROM_MAX_MEM_MAP_CNT) *
+ (sizeof(struct cam_eeprom_memory_map_t)));
+ if (!e_ctrl->cal_data.map) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_EEPROM, "failed");
+ return rc;
+ }
+ map = e_ctrl->cal_data.map;
+
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+ /* Loop through multiple command buffers */
+ for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+ total_cmd_buf_in_bytes = cmd_desc[i].length;
+ processed_cmd_buf_in_bytes = 0;
+ if (!total_cmd_buf_in_bytes)
+ continue;
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &generic_pkt_addr, &pkt_len);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed to get cpu buf");
+ return rc;
+ }
+ cmd_buf = (uint32_t *)generic_pkt_addr;
+ if (!cmd_buf) {
+ CAM_ERR(CAM_EEPROM, "invalid cmd buf");
+ return -EINVAL;
+ }
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+ /* Loop through multiple cmd formats in one cmd buffer */
+ while (processed_cmd_buf_in_bytes < total_cmd_buf_in_bytes) {
+ cmm_hdr = (struct common_header *)cmd_buf;
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+ i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+ /* Configure the following map slave address */
+ map[num_map + 1].saddr = i2c_info->slave_addr;
+ rc = cam_eeprom_update_slaveInfo(e_ctrl,
+ cmd_buf);
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_i2c_info);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+ case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+ cmd_length_in_bytes = total_cmd_buf_in_bytes;
+ rc = cam_sensor_update_power_settings(cmd_buf,
+ cmd_length_in_bytes, power_info);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed");
+ return rc;
+ }
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+ case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+ case CAMERA_SENSOR_CMD_TYPE_WAIT:
+ num_map++;
+ rc = cam_eeprom_parse_memory_map(
+ &e_ctrl->cal_data, cmd_buf,
+ total_cmd_buf_in_bytes,
+ &cmd_length_in_bytes, &num_map);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/sizeof(uint32_t);
+ break;
+ default:
+ break;
+ }
+ }
+ e_ctrl->cal_data.num_map = num_map + 1;
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_get_cal_data - parse the userspace IO config and
+ * copy read data to share with userspace
+ * @e_ctrl: ctrl structure
+ * @csl_packet: csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_packet *csl_packet)
+{
+ struct cam_buf_io_cfg *io_cfg;
+ uint32_t i = 0;
+ int rc = 0;
+ uintptr_t buf_addr;
+ size_t buf_size;
+ uint8_t *read_buffer;
+
+ io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+ &csl_packet->payload +
+ csl_packet->io_configs_offset);
+
+ CAM_DBG(CAM_EEPROM, "number of IO configs: %d:",
+ csl_packet->num_io_configs);
+
+ for (i = 0; i < csl_packet->num_io_configs; i++) {
+ CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction);
+ if (io_cfg->direction == CAM_BUF_OUTPUT) {
+ rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0],
+ &buf_addr, &buf_size);
+ CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n",
+ (void *)buf_addr, buf_size);
+
+ read_buffer = (uint8_t *)buf_addr;
+ if (!read_buffer) {
+ CAM_ERR(CAM_EEPROM,
+ "invalid buffer to copy data");
+ return -EINVAL;
+ }
+ read_buffer += io_cfg->offsets[0];
+
+ if (buf_size < e_ctrl->cal_data.num_data) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to copy, Invalid size");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_EEPROM, "copy the data, len:%d",
+ e_ctrl->cal_data.num_data);
+ memcpy(read_buffer, e_ctrl->cal_data.mapdata,
+ e_ctrl->cal_data.num_data);
+
+ } else {
+ CAM_ERR(CAM_EEPROM, "Invalid direction");
+ rc = -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_pkt_parse - Parse csl packet
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+ int32_t rc = 0;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_config_dev_cmd dev_config;
+ uintptr_t generic_pkt_addr;
+ size_t pkt_len;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+ ioctl_ctrl = (struct cam_control *)arg;
+
+ if (copy_from_user(&dev_config,
+ u64_to_user_ptr(ioctl_ctrl->handle),
+ sizeof(dev_config)))
+ return -EFAULT;
+ rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
+ &generic_pkt_addr, &pkt_len);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "error in converting command Handle Error: %d", rc);
+ return rc;
+ }
+
+ if (dev_config.offset > pkt_len) {
+ CAM_ERR(CAM_EEPROM,
+ "Offset is out of bound: off: %lld, %zu",
+ dev_config.offset, pkt_len);
+ return -EINVAL;
+ }
+
+ csl_packet = (struct cam_packet *)
+ (generic_pkt_addr + (uint32_t)dev_config.offset);
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_EEPROM_PACKET_OPCODE_INIT:
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_eeprom_parse_read_memory_map(
+ e_ctrl->soc_info.dev->of_node, e_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "Failed: rc : %d", rc);
+ return rc;
+ }
+ rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+ vfree(e_ctrl->cal_data.mapdata);
+ vfree(e_ctrl->cal_data.map);
+ e_ctrl->cal_data.num_data = 0;
+ e_ctrl->cal_data.num_map = 0;
+ CAM_DBG(CAM_EEPROM,
+ "Returning the data using kernel probe");
+ break;
+ }
+ rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed in parsing the pkt");
+ return rc;
+ }
+
+ e_ctrl->cal_data.mapdata =
+ vzalloc(e_ctrl->cal_data.num_data);
+ if (!e_ctrl->cal_data.mapdata) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_EEPROM, "failed");
+ goto error;
+ }
+
+ rc = cam_eeprom_power_up(e_ctrl,
+ &soc_private->power_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+ goto memdata_free;
+ }
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG;
+ rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "read_eeprom_memory failed");
+ goto power_down;
+ }
+
+ rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+ rc = cam_eeprom_power_down(e_ctrl);
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+ vfree(e_ctrl->cal_data.mapdata);
+ vfree(e_ctrl->cal_data.map);
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_setting_size = 0;
+ power_info->power_down_setting_size = 0;
+ e_ctrl->cal_data.num_data = 0;
+ e_ctrl->cal_data.num_map = 0;
+ break;
+ default:
+ break;
+ }
+ return rc;
+power_down:
+ cam_eeprom_power_down(e_ctrl);
+memdata_free:
+ vfree(e_ctrl->cal_data.mapdata);
+error:
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ vfree(e_ctrl->cal_data.map);
+ e_ctrl->cal_data.num_data = 0;
+ e_ctrl->cal_data.num_map = 0;
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+ return rc;
+}
+
+void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+ if (e_ctrl->cam_eeprom_state == CAM_EEPROM_INIT)
+ return;
+
+ if (e_ctrl->cam_eeprom_state == CAM_EEPROM_CONFIG) {
+ rc = cam_eeprom_power_down(e_ctrl);
+ if (rc < 0)
+ CAM_ERR(CAM_EEPROM, "EEPROM Power down failed");
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+ }
+
+ if (e_ctrl->cam_eeprom_state == CAM_EEPROM_ACQUIRE) {
+ rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_EEPROM, "destroying the device hdl");
+
+ e_ctrl->bridge_intf.device_hdl = -1;
+ e_ctrl->bridge_intf.link_hdl = -1;
+ e_ctrl->bridge_intf.session_hdl = -1;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_setting_size = 0;
+ power_info->power_down_setting_size = 0;
+ }
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+}
+
+/**
+ * cam_eeprom_driver_cmd - Handle eeprom cmds
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+ int rc = 0;
+ struct cam_eeprom_query_cap_t eeprom_cap = {0};
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (!e_ctrl || !cmd) {
+ CAM_ERR(CAM_EEPROM, "Invalid Arguments");
+ return -EINVAL;
+ }
+
+ if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+ CAM_ERR(CAM_EEPROM, "Invalid handle type: %d",
+ cmd->handle_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&(e_ctrl->eeprom_mutex));
+ switch (cmd->op_code) {
+ case CAM_QUERY_CAP:
+ eeprom_cap.slot_info = e_ctrl->soc_info.index;
+ if (e_ctrl->userspace_probe == false)
+ eeprom_cap.eeprom_kernel_probe = true;
+ else
+ eeprom_cap.eeprom_kernel_probe = false;
+
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &eeprom_cap,
+ sizeof(struct cam_eeprom_query_cap_t))) {
+ CAM_ERR(CAM_EEPROM, "Failed Copy to User");
+ return -EFAULT;
+ goto release_mutex;
+ }
+ CAM_DBG(CAM_EEPROM, "eeprom_cap: ID: %d", eeprom_cap.slot_info);
+ break;
+ case CAM_ACQUIRE_DEV:
+ rc = cam_eeprom_get_dev_handle(e_ctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed to acquire dev");
+ goto release_mutex;
+ }
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+ break;
+ case CAM_RELEASE_DEV:
+ if (e_ctrl->cam_eeprom_state != CAM_EEPROM_ACQUIRE) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_EEPROM,
+ "Not in right state to release : %d",
+ e_ctrl->cam_eeprom_state);
+ goto release_mutex;
+ }
+
+ if (e_ctrl->bridge_intf.device_hdl == -1) {
+ CAM_ERR(CAM_EEPROM,
+ "Invalid Handles: link hdl: %d device hdl: %d",
+ e_ctrl->bridge_intf.device_hdl,
+ e_ctrl->bridge_intf.link_hdl);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_EEPROM,
+ "failed in destroying the device hdl");
+ e_ctrl->bridge_intf.device_hdl = -1;
+ e_ctrl->bridge_intf.link_hdl = -1;
+ e_ctrl->bridge_intf.session_hdl = -1;
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+ break;
+ case CAM_CONFIG_DEV:
+ rc = cam_eeprom_pkt_parse(e_ctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed in eeprom pkt Parsing");
+ goto release_mutex;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_EEPROM, "invalid opcode");
+ break;
+ }
+
+release_mutex:
+ mutex_unlock(&(e_ctrl->eeprom_mutex));
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_core.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
new file mode 100644
index 000000000000..c7d19e71426e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_CORE_H_
+#define _CAM_EEPROM_CORE_H_
+
+#include "cam_eeprom_dev.h"
+
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg);
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+ struct cam_eeprom_ctrl_t *e_ctrl);
+/**
+ * @e_ctrl: EEPROM ctrl structure
+ *
+ * This API handles the shutdown ioctl/close
+ */
+void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl);
+
+#endif
+/* _CAM_EEPROM_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
new file mode 100644
index 000000000000..6d8820abb7d7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -0,0 +1,582 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_eeprom_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_eeprom_soc.h"
+#include "cam_eeprom_core.h"
+#include "cam_debug_util.h"
+
+static long cam_eeprom_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_eeprom_driver_cmd(e_ctrl, arg);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_eeprom_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_eeprom_ctrl_t *e_ctrl =
+ v4l2_get_subdevdata(sd);
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "e_ctrl ptr is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(e_ctrl->eeprom_mutex));
+ cam_eeprom_shutdown(e_ctrl);
+ mutex_unlock(&(e_ctrl->eeprom_mutex));
+
+ return 0;
+}
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_i2c_info_t *i2c_info)
+{
+ struct cam_sensor_cci_client *cci_client = NULL;
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+ cci_client = e_ctrl->io_master_info.cci_client;
+ if (!cci_client) {
+ CAM_ERR(CAM_EEPROM, "failed: cci_client %pK",
+ cci_client);
+ return -EINVAL;
+ }
+ cci_client->cci_i2c_master = e_ctrl->cci_i2c_master;
+ cci_client->sid = (i2c_info->slave_addr) >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+ } else if (e_ctrl->io_master_info.master_type == I2C_MASTER) {
+ e_ctrl->io_master_info.client->addr = i2c_info->slave_addr;
+ CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x", i2c_info->slave_addr);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_eeprom_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_eeprom_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed in eeprom suddev handling rc %d",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_EEPROM, "Invalid compat ioctl: %d", cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops cam_eeprom_internal_ops = {
+ .close = cam_eeprom_subdev_close,
+};
+
+static struct v4l2_subdev_core_ops cam_eeprom_subdev_core_ops = {
+ .ioctl = cam_eeprom_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_eeprom_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_eeprom_subdev_ops = {
+ .core = &cam_eeprom_subdev_core_ops,
+};
+
+static int cam_eeprom_init_subdev(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0;
+
+ e_ctrl->v4l2_dev_str.internal_ops = &cam_eeprom_internal_ops;
+ e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+ strlcpy(e_ctrl->device_name, CAM_EEPROM_NAME,
+ sizeof(e_ctrl->device_name));
+ e_ctrl->v4l2_dev_str.name = e_ctrl->device_name;
+ e_ctrl->v4l2_dev_str.sd_flags =
+ (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+ e_ctrl->v4l2_dev_str.ent_function = CAM_EEPROM_DEVICE_TYPE;
+ e_ctrl->v4l2_dev_str.token = e_ctrl;
+
+ rc = cam_register_subdev(&(e_ctrl->v4l2_dev_str));
+ if (rc)
+ CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev");
+
+ return rc;
+}
+
+static int cam_eeprom_i2c_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_eeprom_soc_private *soc_private = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ CAM_ERR(CAM_EEPROM, "i2c_check_functionality failed");
+ goto probe_failure;
+ }
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "kzalloc failed");
+ rc = -ENOMEM;
+ goto probe_failure;
+ }
+
+ soc_private = kzalloc(sizeof(*soc_private), GFP_KERNEL);
+ if (!soc_private)
+ goto ectrl_free;
+
+ e_ctrl->soc_info.soc_private = soc_private;
+
+ i2c_set_clientdata(client, e_ctrl);
+
+ mutex_init(&(e_ctrl->eeprom_mutex));
+
+ soc_info = &e_ctrl->soc_info;
+ soc_info->dev = &client->dev;
+ soc_info->dev_name = client->name;
+ e_ctrl->io_master_info.master_type = I2C_MASTER;
+ e_ctrl->io_master_info.client = client;
+ e_ctrl->eeprom_device_type = MSM_CAMERA_I2C_DEVICE;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = false;
+
+ rc = cam_eeprom_parse_dt(e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc);
+ goto free_soc;
+ }
+
+ rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc);
+ goto free_soc;
+ }
+
+ rc = cam_eeprom_init_subdev(e_ctrl);
+ if (rc)
+ goto free_soc;
+
+ if (soc_private->i2c_info.slave_addr != 0)
+ e_ctrl->io_master_info.client->addr =
+ soc_private->i2c_info.slave_addr;
+
+ e_ctrl->bridge_intf.device_hdl = -1;
+ e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+ e_ctrl->bridge_intf.ops.link_setup = NULL;
+ e_ctrl->bridge_intf.ops.apply_req = NULL;
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+
+ return rc;
+free_soc:
+ kfree(soc_private);
+ectrl_free:
+ kfree(e_ctrl);
+probe_failure:
+ return rc;
+}
+
+static int cam_eeprom_i2c_driver_remove(struct i2c_client *client)
+{
+ int i;
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct cam_eeprom_ctrl_t *e_ctrl;
+ struct cam_eeprom_soc_private *soc_private;
+ struct cam_hw_soc_info *soc_info;
+
+ if (!sd) {
+ CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+ return -EINVAL;
+ }
+
+ e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_EEPROM, "soc_info.soc_private is NULL");
+ return -EINVAL;
+ }
+
+ soc_info = &e_ctrl->soc_info;
+ for (i = 0; i < soc_info->num_clk; i++)
+ devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+ mutex_destroy(&(e_ctrl->eeprom_mutex));
+ kfree(soc_private);
+ kfree(e_ctrl->io_master_info.cci_client);
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
+ kfree(e_ctrl);
+
+ return 0;
+}
+
+static int cam_eeprom_spi_setup(struct spi_device *spi)
+{
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_sensor_spi_client *spi_client;
+ struct cam_eeprom_soc_private *eb_info;
+ struct cam_sensor_power_ctrl_t *power_info = NULL;
+ int rc = 0;
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+
+ soc_info = &e_ctrl->soc_info;
+ soc_info->dev = &spi->dev;
+ soc_info->dev_name = spi->modalias;
+
+ e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+ e_ctrl->userspace_probe = false;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+
+ spi_client = kzalloc(sizeof(*spi_client), GFP_KERNEL);
+ if (!spi_client) {
+ kfree(e_ctrl);
+ return -ENOMEM;
+ }
+
+ eb_info = kzalloc(sizeof(*eb_info), GFP_KERNEL);
+ if (!eb_info)
+ goto spi_free;
+ e_ctrl->soc_info.soc_private = eb_info;
+
+ e_ctrl->eeprom_device_type = MSM_CAMERA_SPI_DEVICE;
+ e_ctrl->io_master_info.spi_client = spi_client;
+ e_ctrl->io_master_info.master_type = SPI_MASTER;
+ spi_client->spi_master = spi;
+
+ power_info = &eb_info->power_info;
+ power_info->dev = &spi->dev;
+
+ /* set spi instruction info */
+ spi_client->retry_delay = 1;
+ spi_client->retries = 0;
+
+ /* Initialize mutex */
+ mutex_init(&(e_ctrl->eeprom_mutex));
+
+ rc = cam_eeprom_parse_dt(e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: spi soc init rc %d", rc);
+ goto board_free;
+ }
+
+ rc = cam_eeprom_spi_parse_of(spi_client);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Device tree parsing error");
+ goto board_free;
+ }
+
+ rc = cam_eeprom_init_subdev(e_ctrl);
+ if (rc)
+ goto board_free;
+
+ e_ctrl->bridge_intf.device_hdl = -1;
+ e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+ e_ctrl->bridge_intf.ops.link_setup = NULL;
+ e_ctrl->bridge_intf.ops.apply_req = NULL;
+
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+ return rc;
+
+board_free:
+ kfree(e_ctrl->soc_info.soc_private);
+spi_free:
+ kfree(spi_client);
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int cam_eeprom_spi_driver_probe(struct spi_device *spi)
+{
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ spi_setup(spi);
+
+ CAM_DBG(CAM_EEPROM, "irq[%d] cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x]",
+ spi->irq, spi->chip_select, (spi->mode & SPI_CPHA) ? 1 : 0,
+ (spi->mode & SPI_CPOL) ? 1 : 0,
+ (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ CAM_DBG(CAM_EEPROM, "max_speed[%u]", spi->max_speed_hz);
+
+ return cam_eeprom_spi_setup(spi);
+}
+
+static int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
+{
+ int i;
+ struct v4l2_subdev *sd = spi_get_drvdata(sdev);
+ struct cam_eeprom_ctrl_t *e_ctrl;
+ struct cam_eeprom_soc_private *soc_private;
+ struct cam_hw_soc_info *soc_info;
+
+ if (!sd) {
+ CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+ return -EINVAL;
+ }
+
+ e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ soc_info = &e_ctrl->soc_info;
+ for (i = 0; i < soc_info->num_clk; i++)
+ devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+ kfree(e_ctrl->io_master_info.spi_client);
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ if (soc_private) {
+ kfree(soc_private->power_info.gpio_num_info);
+ kfree(soc_private);
+ }
+ mutex_destroy(&(e_ctrl->eeprom_mutex));
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
+ kfree(e_ctrl);
+
+ return 0;
+}
+
+static int32_t cam_eeprom_platform_driver_probe(
+ struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_eeprom_soc_private *soc_private = NULL;
+
+ e_ctrl = kzalloc(sizeof(struct cam_eeprom_ctrl_t), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+
+ e_ctrl->soc_info.pdev = pdev;
+ e_ctrl->soc_info.dev = &pdev->dev;
+ e_ctrl->soc_info.dev_name = pdev->name;
+ e_ctrl->eeprom_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = false;
+
+ e_ctrl->io_master_info.master_type = CCI_MASTER;
+ e_ctrl->io_master_info.cci_client = kzalloc(
+ sizeof(struct cam_sensor_cci_client), GFP_KERNEL);
+ if (!e_ctrl->io_master_info.cci_client) {
+ rc = -ENOMEM;
+ goto free_e_ctrl;
+ }
+
+ soc_private = kzalloc(sizeof(struct cam_eeprom_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto free_cci_client;
+ }
+ e_ctrl->soc_info.soc_private = soc_private;
+ soc_private->power_info.dev = &pdev->dev;
+
+ /* Initialize mutex */
+ mutex_init(&(e_ctrl->eeprom_mutex));
+ rc = cam_eeprom_parse_dt(e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc);
+ goto free_soc;
+ }
+ rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc);
+ goto free_soc;
+ }
+
+ rc = cam_eeprom_init_subdev(e_ctrl);
+ if (rc)
+ goto free_soc;
+
+ e_ctrl->bridge_intf.device_hdl = -1;
+ e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+ e_ctrl->bridge_intf.ops.link_setup = NULL;
+ e_ctrl->bridge_intf.ops.apply_req = NULL;
+
+ platform_set_drvdata(pdev, e_ctrl);
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+
+ return rc;
+free_soc:
+ kfree(soc_private);
+free_cci_client:
+ kfree(e_ctrl->io_master_info.cci_client);
+free_e_ctrl:
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int cam_eeprom_platform_driver_remove(struct platform_device *pdev)
+{
+ int i;
+ struct cam_eeprom_ctrl_t *e_ctrl;
+ struct cam_hw_soc_info *soc_info;
+
+ e_ctrl = platform_get_drvdata(pdev);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ soc_info = &e_ctrl->soc_info;
+
+ for (i = 0; i < soc_info->num_clk; i++)
+ devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+ mutex_destroy(&(e_ctrl->eeprom_mutex));
+ kfree(soc_info->soc_private);
+ kfree(e_ctrl->io_master_info.cci_client);
+ platform_set_drvdata(pdev, NULL);
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
+ kfree(e_ctrl);
+ return 0;
+}
+
+static const struct of_device_id cam_eeprom_dt_match[] = {
+ { .compatible = "qcom,eeprom" },
+ { }
+};
+
+
+MODULE_DEVICE_TABLE(of, cam_eeprom_dt_match);
+
+static struct platform_driver cam_eeprom_platform_driver = {
+ .driver = {
+ .name = "qcom,eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_eeprom_dt_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = cam_eeprom_platform_driver_probe,
+ .remove = cam_eeprom_platform_driver_remove,
+};
+
+static const struct i2c_device_id cam_eeprom_i2c_id[] = {
+ { "msm_eeprom", (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver cam_eeprom_i2c_driver = {
+ .id_table = cam_eeprom_i2c_id,
+ .probe = cam_eeprom_i2c_driver_probe,
+ .remove = cam_eeprom_i2c_driver_remove,
+ .driver = {
+ .name = "msm_eeprom",
+ },
+};
+
+static struct spi_driver cam_eeprom_spi_driver = {
+ .driver = {
+ .name = "qcom_eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_eeprom_dt_match,
+ },
+ .probe = cam_eeprom_spi_driver_probe,
+ .remove = cam_eeprom_spi_driver_remove,
+};
+static int __init cam_eeprom_driver_init(void)
+{
+ int rc = 0;
+
+ rc = platform_driver_register(&cam_eeprom_platform_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "platform_driver_register failed rc = %d",
+ rc);
+ return rc;
+ }
+
+ rc = spi_register_driver(&cam_eeprom_spi_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "spi_register_driver failed rc = %d", rc);
+ return rc;
+ }
+
+ rc = i2c_add_driver(&cam_eeprom_i2c_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "i2c_add_driver failed rc = %d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void __exit cam_eeprom_driver_exit(void)
+{
+ platform_driver_unregister(&cam_eeprom_platform_driver);
+ spi_unregister_driver(&cam_eeprom_spi_driver);
+ i2c_del_driver(&cam_eeprom_i2c_driver);
+}
+
+module_init(cam_eeprom_driver_init);
+module_exit(cam_eeprom_driver_exit);
+MODULE_DESCRIPTION("CAM EEPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
new file mode 100644
index 000000000000..ff9b244a3391
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -0,0 +1,190 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_DEV_H_
+#define _CAM_EEPROM_DEV_H_
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_sensor.h>
+#include <cam_sensor_i2c.h>
+#include <cam_sensor_spi.h>
+#include <cam_sensor_io.h>
+#include <cam_cci_dev.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include <cam_subdev.h>
+#include "cam_soc_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define PROPERTY_MAXSIZE 32
+
+#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE 80
+#define MSM_EEPROM_MAX_MEM_MAP_CNT 8
+#define MSM_EEPROM_MEM_MAP_PROPERTIES_CNT 8
+
+enum cam_eeprom_state {
+ CAM_EEPROM_INIT,
+ CAM_EEPROM_ACQUIRE,
+ CAM_EEPROM_CONFIG,
+};
+
+/**
+ * struct cam_eeprom_map_t - eeprom map
+ * @data_type : Data type
+ * @addr_type : Address type
+ * @addr : Address
+ * @data : data
+ * @delay : Delay
+ *
+ */
+struct cam_eeprom_map_t {
+ uint32_t valid_size;
+ uint32_t addr;
+ uint32_t addr_type;
+ uint32_t data;
+ uint32_t data_type;
+ uint32_t delay;
+};
+
+/**
+ * struct cam_eeprom_memory_map_t - eeprom memory map types
+ * @page : page memory
+ * @pageen : pageen memory
+ * @poll : poll memory
+ * @mem : mem
+ * @saddr : slave addr
+ *
+ */
+struct cam_eeprom_memory_map_t {
+ struct cam_eeprom_map_t page;
+ struct cam_eeprom_map_t pageen;
+ struct cam_eeprom_map_t poll;
+ struct cam_eeprom_map_t mem;
+ uint32_t saddr;
+};
+
+/**
+ * struct cam_eeprom_memory_block_t - eeprom mem block info
+ * @map : eeprom memory map
+ * @num_map : number of map blocks
+ * @mapdata : map data
+ * @cmd_type : size of total mapdata
+ *
+ */
+struct cam_eeprom_memory_block_t {
+ struct cam_eeprom_memory_map_t *map;
+ uint32_t num_map;
+ uint8_t *mapdata;
+ uint32_t num_data;
+};
+
+/**
+ * struct cam_eeprom_cmm_t - camera multimodule
+ * @cmm_support : cmm support flag
+ * @cmm_compression : cmm compression flag
+ * @cmm_offset : cmm data start offset
+ * @cmm_size : cmm data size
+ *
+ */
+struct cam_eeprom_cmm_t {
+ uint32_t cmm_support;
+ uint32_t cmm_compression;
+ uint32_t cmm_offset;
+ uint32_t cmm_size;
+};
+
+/**
+ * struct cam_eeprom_i2c_info_t - I2C info
+ * @slave_addr : slave address
+ * @i2c_freq_mode : i2c frequency mode
+ *
+ */
+struct cam_eeprom_i2c_info_t {
+ uint16_t slave_addr;
+ uint8_t i2c_freq_mode;
+};
+
+/**
+ * struct cam_eeprom_soc_private - eeprom soc private data structure
+ * @eeprom_name : eeprom name
+ * @i2c_info : i2c info structure
+ * @power_info : eeprom power info
+ * @cmm_data : cmm data
+ *
+ */
+struct cam_eeprom_soc_private {
+ const char *eeprom_name;
+ struct cam_eeprom_i2c_info_t i2c_info;
+ struct cam_sensor_power_ctrl_t power_info;
+ struct cam_eeprom_cmm_t cmm_data;
+};
+
+/**
+ * struct cam_eeprom_intf_params - bridge interface params
+ * @device_hdl : Device Handle
+ * @session_hdl : Session Handle
+ * @ops : KMD operations
+ * @crm_cb : Callback API pointers
+ */
+struct cam_eeprom_intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_cmd_conditional_wait - Conditional wait command
+ * @pdev : platform device
+ * @spi : spi device
+ * @eeprom_mutex : eeprom mutex
+ * @soc_info : eeprom soc related info
+ * @io_master_info : Information about the communication master
+ * @gpio_num_info : gpio info
+ * @cci_i2c_master : I2C structure
+ * @v4l2_dev_str : V4L2 device structure
+ * @bridge_intf : bridge interface params
+ * @cam_eeprom_state: eeprom_device_state
+ * @userspace_probe : flag indicates userspace or kernel probe
+ * @cal_data : Calibration data
+ * @device_name : Device name
+ *
+ */
+struct cam_eeprom_ctrl_t {
+ struct platform_device *pdev;
+ struct spi_device *spi;
+ struct mutex eeprom_mutex;
+ struct cam_hw_soc_info soc_info;
+ struct camera_io_master io_master_info;
+ struct msm_camera_gpio_num_info *gpio_num_info;
+ enum cci_i2c_master_t cci_i2c_master;
+ enum cci_device_num cci_num;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_eeprom_intf_params bridge_intf;
+ enum msm_camera_device_type_t eeprom_device_type;
+ enum cam_eeprom_state cam_eeprom_state;
+ bool userspace_probe;
+ struct cam_eeprom_memory_block_t cal_data;
+ char device_name[20];
+};
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_i2c_info_t *i2c_info);
+
+#endif /*_CAM_EEPROM_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
new file mode 100644
index 000000000000..bd087b7ec51f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -0,0 +1,385 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+
+#define cam_eeprom_spi_parse_cmd(spi_dev, name, out) \
+ { \
+ spi_dev->cmd_tbl.name.opcode = out[0]; \
+ spi_dev->cmd_tbl.name.addr_len = out[1]; \
+ spi_dev->cmd_tbl.name.dummy_len = out[2]; \
+ spi_dev->cmd_tbl.name.delay_intv = out[3]; \
+ spi_dev->cmd_tbl.name.delay_count = out[4]; \
+ }
+
+int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *spi_dev)
+{
+ int rc = -EFAULT;
+ uint32_t tmp[5];
+
+ rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+ "spiop-read", tmp, 5);
+ if (!rc) {
+ cam_eeprom_spi_parse_cmd(spi_dev, read, tmp);
+ } else {
+ CAM_ERR(CAM_EEPROM, "Failed to get read data");
+ return -EFAULT;
+ }
+
+ rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+ "spiop-readseq", tmp, 5);
+ if (!rc) {
+ cam_eeprom_spi_parse_cmd(spi_dev, read_seq, tmp);
+ } else {
+ CAM_ERR(CAM_EEPROM, "Failed to get readseq data");
+ return -EFAULT;
+ }
+
+ rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+ "spiop-queryid", tmp, 5);
+ if (!rc) {
+ cam_eeprom_spi_parse_cmd(spi_dev, query_id, tmp);
+ } else {
+ CAM_ERR(CAM_EEPROM, "Failed to get queryid data");
+ return -EFAULT;
+ }
+
+ rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+ "spiop-pprog", tmp, 5);
+ if (!rc) {
+ cam_eeprom_spi_parse_cmd(spi_dev, page_program, tmp);
+ } else {
+ CAM_ERR(CAM_EEPROM, "Failed to get page program data");
+ return -EFAULT;
+ }
+
+ rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+ "spiop-wenable", tmp, 5);
+ if (!rc) {
+ cam_eeprom_spi_parse_cmd(spi_dev, write_enable, tmp);
+ } else {
+ CAM_ERR(CAM_EEPROM, "Failed to get write enable data");
+ return rc;
+ }
+
+ rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+ "spiop-readst", tmp, 5);
+ if (!rc) {
+ cam_eeprom_spi_parse_cmd(spi_dev, read_status, tmp);
+ } else {
+ CAM_ERR(CAM_EEPROM, "Failed to get readdst data");
+ return rc;
+ }
+
+ rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+ "spiop-erase", tmp, 5);
+ if (!rc) {
+ cam_eeprom_spi_parse_cmd(spi_dev, erase, tmp);
+ } else {
+ CAM_ERR(CAM_EEPROM, "Failed to get erase data");
+ return rc;
+ }
+
+ rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+ "eeprom-id", tmp, 2);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed to get eeprom id");
+ return rc;
+ }
+
+ spi_dev->mfr_id0 = tmp[0];
+ spi_dev->device_id0 = tmp[1];
+
+ return 0;
+}
+
+/*
+ * cam_eeprom_parse_memory_map() - parse memory map in device node
+ * @of: device node
+ * @data: memory block for output
+ *
+ * This functions parses @of to fill @data. It allocates map itself, parses
+ * the @of node, calculate total data length, and allocates required buffer.
+ * It only fills the map, but does not perform actual reading.
+ */
+int cam_eeprom_parse_dt_memory_map(struct device_node *node,
+ struct cam_eeprom_memory_block_t *data)
+{
+ int i, rc = 0;
+ char property[PROPERTY_MAXSIZE];
+ uint32_t count = MSM_EEPROM_MEM_MAP_PROPERTIES_CNT;
+ struct cam_eeprom_memory_map_t *map;
+
+ snprintf(property, PROPERTY_MAXSIZE, "num-blocks");
+ rc = of_property_read_u32(node, property, &data->num_map);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: num-blocks not available rc %d",
+ rc);
+ return rc;
+ }
+
+ map = vzalloc((sizeof(*map) * data->num_map));
+ if (!map) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ data->map = map;
+
+ for (i = 0; i < data->num_map; i++) {
+ snprintf(property, PROPERTY_MAXSIZE, "page%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].page, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: page not available rc %d",
+ rc);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE, "pageen%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].pageen, count);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "pageen not needed");
+
+ snprintf(property, PROPERTY_MAXSIZE, "saddr%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].saddr, 1);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "saddr not needed - block %d", i);
+
+ snprintf(property, PROPERTY_MAXSIZE, "poll%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].poll, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: poll not available rc %d",
+ rc);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE, "mem%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].mem, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: mem not available rc %d",
+ rc);
+ goto ERROR;
+ }
+ data->num_data += map[i].mem.valid_size;
+ }
+
+ data->mapdata = vzalloc(data->num_data);
+ if (!data->mapdata) {
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+ return rc;
+
+ERROR:
+ vfree(data->map);
+ memset(data, 0, sizeof(*data));
+ return rc;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * Parses eeprom dt
+ */
+static int cam_eeprom_get_dt_data(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+ struct device_node *of_node = NULL;
+
+ of_node = soc_info->dev->of_node;
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_get_dt_power_setting_data(of_node,
+ soc_info, power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed in getting power settings");
+ return rc;
+ }
+ }
+
+ if (!soc_info->gpio_data) {
+ CAM_INFO(CAM_EEPROM, "No GPIO found");
+ return 0;
+ }
+
+ if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+ CAM_INFO(CAM_EEPROM, "No GPIO found");
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &power_info->gpio_num_info);
+ if ((rc < 0) || (!power_info->gpio_num_info)) {
+ CAM_ERR(CAM_EEPROM, "No/Error EEPROM GPIOs");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * @eb_info: eeprom private data structure
+ * @of_node: eeprom device node
+ *
+ * This function parses the eeprom dt to get the MM data
+ */
+static int cam_eeprom_cmm_dts(struct cam_eeprom_soc_private *eb_info,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct cam_eeprom_cmm_t *cmm_data = &eb_info->cmm_data;
+
+ cmm_data->cmm_support =
+ of_property_read_bool(of_node, "cmm-data-support");
+ if (!cmm_data->cmm_support) {
+ CAM_DBG(CAM_EEPROM, "No cmm support");
+ return 0;
+ }
+
+ cmm_data->cmm_compression =
+ of_property_read_bool(of_node, "cmm-data-compressed");
+
+ rc = of_property_read_u32(of_node, "cmm-data-offset",
+ &cmm_data->cmm_offset);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "No MM offset data rc %d", rc);
+
+ rc = of_property_read_u32(of_node, "cmm-data-size",
+ &cmm_data->cmm_size);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "No MM size data rc %d", rc);
+
+ CAM_DBG(CAM_EEPROM, "cmm_compr %d, cmm_offset %d, cmm_size %d",
+ cmm_data->cmm_compression, cmm_data->cmm_offset,
+ cmm_data->cmm_size);
+ return 0;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * This function is called from cam_eeprom_platform/i2c/spi_driver_probe
+ * it parses the eeprom dt node and decides for userspace or kernel probe.
+ */
+int cam_eeprom_parse_dt(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int i, rc = 0;
+ struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info;
+ struct device_node *of_node = NULL;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ uint32_t temp;
+
+ if (!soc_info->dev) {
+ CAM_ERR(CAM_EEPROM, "Dev is NULL");
+ return -EINVAL;
+ }
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "Failed to read DT properties rc : %d", rc);
+ return rc;
+ }
+
+ of_node = soc_info->dev->of_node;
+
+ rc = of_property_read_string(of_node, "eeprom-name",
+ &soc_private->eeprom_name);
+ if (rc < 0) {
+ CAM_DBG(CAM_EEPROM, "kernel probe is not enabled");
+ e_ctrl->userspace_probe = true;
+ }
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+ rc = of_property_read_u32(of_node, "cci-master",
+ &e_ctrl->cci_i2c_master);
+ if (rc < 0 || (e_ctrl->cci_i2c_master >= MASTER_MAX)) {
+ CAM_DBG(CAM_EEPROM, "failed rc %d", rc);
+ rc = -EFAULT;
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "cci-device",
+ &e_ctrl->cci_num);
+ CAM_DBG(CAM_ACTUATOR, "cci-device %d, rc %d",
+ e_ctrl->cci_num, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ e_ctrl->cci_num = CCI_DEVICE_0;
+ rc = 0;
+ }
+ e_ctrl->io_master_info.cci_client->cci_device = e_ctrl->cci_num;
+ }
+
+ if (e_ctrl->io_master_info.master_type == SPI_MASTER) {
+ rc = cam_eeprom_cmm_dts(soc_private, soc_info->dev->of_node);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "MM data not available rc %d", rc);
+ }
+
+ rc = cam_eeprom_get_dt_data(e_ctrl);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc);
+
+ if ((e_ctrl->userspace_probe == false) &&
+ (e_ctrl->io_master_info.master_type != SPI_MASTER)) {
+ rc = of_property_read_u32(of_node, "slave-addr", &temp);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "failed: no slave-addr rc %d", rc);
+
+ soc_private->i2c_info.slave_addr = temp;
+
+ rc = of_property_read_u32(of_node, "i2c-freq-mode", &temp);
+ soc_private->i2c_info.i2c_freq_mode = temp;
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM,
+ "i2c-freq-mode read fail %d", rc);
+ soc_private->i2c_info.i2c_freq_mode = 0;
+ }
+ if (soc_private->i2c_info.i2c_freq_mode >= I2C_MAX_MODES) {
+ CAM_ERR(CAM_EEPROM, "invalid i2c_freq_mode = %d",
+ soc_private->i2c_info.i2c_freq_mode);
+ soc_private->i2c_info.i2c_freq_mode = 0;
+ }
+ CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X",
+ soc_private->i2c_info.slave_addr);
+ }
+
+ for (i = 0; i < soc_info->num_clk; i++) {
+ soc_info->clk[i] = devm_clk_get(soc_info->dev,
+ soc_info->clk_name[i]);
+ if (!soc_info->clk[i]) {
+ CAM_ERR(CAM_EEPROM, "get failed for %s",
+ soc_info->clk_name[i]);
+ rc = -ENOENT;
+ return rc;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
new file mode 100644
index 000000000000..0a7f8e179635
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_SOC_H_
+#define _CAM_EEPROM_SOC_H_
+
+#include "cam_eeprom_dev.h"
+
+int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *client);
+
+int cam_eeprom_parse_dt_memory_map(struct device_node *of,
+ struct cam_eeprom_memory_block_t *data);
+
+int cam_eeprom_parse_dt(struct cam_eeprom_ctrl_t *e_ctrl);
+#endif/* _CAM_EEPROM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/Makefile
new file mode 100644
index 000000000000..79ad59f35f8f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash_dev.o cam_flash_core.o cam_flash_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_core.c
new file mode 100644
index 000000000000..5c9df8ac7abf
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -0,0 +1,1587 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_flash_core.h"
+#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
+
+static int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+ bool regulator_enable)
+{
+ int rc = 0;
+
+ if (!(flash_ctrl->switch_trigger)) {
+ CAM_ERR(CAM_FLASH, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if (regulator_enable &&
+ (flash_ctrl->is_regulator_enabled == false)) {
+ rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ ENABLE_REGULATOR, NULL);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "regulator enable failed rc = %d",
+ rc);
+ return rc;
+ }
+ flash_ctrl->is_regulator_enabled = true;
+ } else if ((!regulator_enable) &&
+ (flash_ctrl->is_regulator_enabled == true)) {
+ rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ DISABLE_REGULATOR, NULL);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "regulator disable failed rc = %d",
+ rc);
+ return rc;
+ }
+ flash_ctrl->is_regulator_enabled = false;
+ } else {
+ CAM_ERR(CAM_FLASH, "Wrong Flash State : %d",
+ flash_ctrl->flash_state);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_flash_pmic_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+ int j = 0;
+ struct cam_flash_frame_setting *nrt_settings;
+
+ if (!fctrl)
+ return -EINVAL;
+
+ nrt_settings = &fctrl->nrt_info;
+
+ if (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO) {
+ fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
+ } else if ((nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+ (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER) ||
+ (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE)) {
+ fctrl->nrt_info.cmn_attr.is_settings_valid = false;
+ fctrl->nrt_info.cmn_attr.count = 0;
+ fctrl->nrt_info.num_iterations = 0;
+ fctrl->nrt_info.led_on_delay_ms = 0;
+ fctrl->nrt_info.led_off_delay_ms = 0;
+ for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+ fctrl->nrt_info.led_current_ma[j] = 0;
+ }
+
+ return 0;
+}
+
+static int cam_flash_i2c_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+ int rc = 0;
+
+ if (fctrl->i2c_data.init_settings.is_settings_valid == true) {
+ rc = delete_request(&fctrl->i2c_data.init_settings);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "Failed to delete Init i2c_setting: %d",
+ rc);
+ return rc;
+ }
+ }
+ if (fctrl->i2c_data.config_settings.is_settings_valid == true) {
+ rc = delete_request(&fctrl->i2c_data.config_settings);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "Failed to delete NRT i2c_setting: %d",
+ rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int cam_flash_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0;
+
+ power_info->power_setting_size = 1;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_setting)
+ return -ENOMEM;
+
+ power_info->power_setting[0].seq_type = SENSOR_CUSTOM_REG1;
+ power_info->power_setting[0].seq_val = CAM_V_CUSTOM1;
+ power_info->power_setting[0].config_val = 0;
+ power_info->power_setting[0].delay = 2;
+
+ power_info->power_down_setting_size = 1;
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ rc = -ENOMEM;
+ goto free_power_settings;
+ }
+
+ power_info->power_down_setting[0].seq_type = SENSOR_CUSTOM_REG1;
+ power_info->power_down_setting[0].seq_val = CAM_V_CUSTOM1;
+ power_info->power_down_setting[0].config_val = 0;
+
+ return rc;
+
+free_power_settings:
+ kfree(power_info->power_setting);
+ power_info->power_setting = NULL;
+ power_info->power_setting_size = 0;
+ return rc;
+}
+
+int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl,
+ bool regulator_enable)
+{
+ int rc = 0;
+
+ if (!(fctrl->switch_trigger)) {
+ CAM_ERR(CAM_FLASH, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if (regulator_enable) {
+ rc = cam_flash_prepare(fctrl, true);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ return rc;
+ }
+ }
+
+ if (!regulator_enable) {
+ if ((fctrl->flash_state == CAM_FLASH_STATE_START) &&
+ (fctrl->is_regulator_enabled == true)) {
+ rc = cam_flash_prepare(fctrl, false);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Disable Regulator Failed rc: %d", rc);
+ }
+ }
+
+ return rc;
+}
+
+int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl,
+ bool regulator_enable)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &fctrl->soc_info;
+ struct cam_sensor_power_ctrl_t *power_info =
+ &fctrl->power_info;
+
+ if (!power_info || !soc_info) {
+ CAM_ERR(CAM_FLASH, "Power Info is NULL");
+ return -EINVAL;
+ }
+ power_info->dev = soc_info->dev;
+
+ if (regulator_enable && (fctrl->is_regulator_enabled == false)) {
+ if ((power_info->power_setting == NULL) &&
+ (power_info->power_down_setting == NULL)) {
+ CAM_INFO(CAM_FLASH,
+ "Using default power settings");
+ rc = cam_flash_construct_default_power_setting(
+ power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH,
+ "Construct default pwr setting failed rc: %d",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "power up the core is failed:%d",
+ rc);
+ goto free_pwr_settings;
+ }
+
+ rc = camera_io_init(&(fctrl->io_master_info));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cci_init failed: rc: %d", rc);
+ cam_sensor_util_power_down(power_info, soc_info);
+ goto free_pwr_settings;
+ }
+ fctrl->is_regulator_enabled = true;
+ } else if ((!regulator_enable) &&
+ (fctrl->is_regulator_enabled == true)) {
+ rc = cam_sensor_util_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "power down the core is failed:%d",
+ rc);
+ return rc;
+ }
+ camera_io_release(&(fctrl->io_master_info));
+ fctrl->is_regulator_enabled = false;
+ goto free_pwr_settings;
+ }
+ return rc;
+
+free_pwr_settings:
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_setting_size = 0;
+ power_info->power_down_setting_size = 0;
+
+ return rc;
+}
+
+int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type type, uint64_t req_id)
+{
+ int rc = 0;
+ int i = 0, j = 0;
+ int frame_offset = 0;
+
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ if (type == FLUSH_ALL) {
+ cam_flash_off(fctrl);
+ /* flush all requests*/
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ fctrl->per_frame[i].cmn_attr.request_id = 0;
+ fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
+ fctrl->per_frame[i].cmn_attr.count = 0;
+ for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+ fctrl->per_frame[i].led_current_ma[j] = 0;
+ }
+
+ cam_flash_pmic_flush_nrt(fctrl);
+ } else if ((type == FLUSH_REQ) && (req_id != 0)) {
+ /* flush request with req_id*/
+ frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+ fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+ fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+ false;
+ fctrl->per_frame[frame_offset].cmn_attr.count = 0;
+ for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
+ fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+ } else if ((type == FLUSH_REQ) && (req_id == 0)) {
+ /* Handels NonRealTime usecase */
+ cam_flash_pmic_flush_nrt(fctrl);
+ } else {
+ CAM_ERR(CAM_FLASH, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type type, uint64_t req_id)
+{
+ int rc = 0;
+ int i = 0;
+ uint32_t cancel_req_id_found = 0;
+ struct i2c_settings_array *i2c_set = NULL;
+
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+ if ((type == FLUSH_REQ) && (req_id == 0)) {
+ /* This setting will be called only when NonRealTime
+ * settings needs to clean.
+ */
+ cam_flash_i2c_flush_nrt(fctrl);
+ } else {
+ /* All other usecase will be handle here */
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ i2c_set = &(fctrl->i2c_data.per_frame[i]);
+
+ if ((type == FLUSH_REQ) &&
+ (i2c_set->request_id != req_id))
+ continue;
+
+ if (i2c_set->is_settings_valid == 1) {
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_FLASH,
+ "delete request: %lld rc: %d",
+ i2c_set->request_id, rc);
+
+ if (type == FLUSH_REQ) {
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ if ((type == FLUSH_REQ) && (req_id != 0) &&
+ (!cancel_req_id_found))
+ CAM_DBG(CAM_FLASH,
+ "Flush request id:%lld not found in the pending list",
+ req_id);
+
+ return rc;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+ int rc = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&fctrl->flash_mutex);
+ if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ rc = fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "FLUSH_TYPE_ALL failed rc: %d", rc);
+ goto end;
+ }
+ } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ rc = fctrl->func_tbl.flush_req(fctrl,
+ FLUSH_REQ, flush->req_id);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "FLUSH_REQ failed rc: %d", rc);
+ goto end;
+ }
+ }
+end:
+ mutex_unlock(&fctrl->flash_mutex);
+ return rc;
+}
+
+static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data, enum camera_flash_opcode op)
+{
+ uint32_t curr = 0, max_current = 0;
+ struct cam_flash_private_soc *soc_private = NULL;
+ int i = 0;
+
+ if (!flash_ctrl || !flash_data) {
+ CAM_ERR(CAM_FLASH, "Fctrl or Data NULL");
+ return -EINVAL;
+ }
+
+ soc_private = (struct cam_flash_private_soc *)
+ flash_ctrl->soc_info.soc_private;
+
+ if (op == CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+ if (flash_ctrl->torch_trigger[i]) {
+ max_current = soc_private->torch_max_current[i];
+
+ if (flash_data->led_current_ma[i] <=
+ max_current)
+ curr = flash_data->led_current_ma[i];
+ else
+ curr = soc_private->torch_op_current[i];
+
+ CAM_DBG(CAM_PERF,
+ "Led_Current[%d] = %d", i, curr);
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->torch_trigger[i],
+ curr);
+ }
+ }
+ } else if (op == CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+ if (flash_ctrl->flash_trigger[i]) {
+ max_current = soc_private->flash_max_current[i];
+
+ if (flash_data->led_current_ma[i] <=
+ max_current)
+ curr = flash_data->led_current_ma[i];
+ else
+ curr = soc_private->flash_op_current[i];
+
+ CAM_DBG(CAM_PERF, "LED flash_current[%d]: %d",
+ i, curr);
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->flash_trigger[i],
+ curr);
+ }
+ }
+ } else {
+ CAM_ERR(CAM_FLASH, "Wrong Operation: %d", op);
+ return -EINVAL;
+ }
+
+ if (flash_ctrl->switch_trigger)
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->switch_trigger,
+ LED_SWITCH_ON);
+
+ return 0;
+}
+
+int cam_flash_off(struct cam_flash_ctrl *flash_ctrl)
+{
+ if (!flash_ctrl) {
+ CAM_ERR(CAM_FLASH, "Flash control Null");
+ return -EINVAL;
+ }
+
+ if (flash_ctrl->switch_trigger)
+ cam_res_mgr_led_trigger_event(flash_ctrl->switch_trigger,
+ LED_SWITCH_OFF);
+
+ flash_ctrl->flash_state = CAM_FLASH_STATE_START;
+ return 0;
+}
+
+static int cam_flash_low(
+ struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data)
+{
+ int i = 0, rc = 0;
+
+ if (!flash_data) {
+ CAM_ERR(CAM_FLASH, "Flash Data Null");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->flash_trigger[i],
+ LED_OFF);
+
+ rc = cam_flash_ops(flash_ctrl, flash_data,
+ CAMERA_SENSOR_FLASH_OP_FIRELOW);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Fire Torch failed: %d", rc);
+
+ return rc;
+}
+
+static int cam_flash_high(
+ struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data)
+{
+ int i = 0, rc = 0;
+
+ if (!flash_data) {
+ CAM_ERR(CAM_FLASH, "Flash Data Null");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->torch_trigger[i],
+ LED_OFF);
+
+ rc = cam_flash_ops(flash_ctrl, flash_data,
+ CAMERA_SENSOR_FLASH_OP_FIREHIGH);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Fire Flash Failed: %d", rc);
+
+ return rc;
+}
+
+static int cam_flash_i2c_delete_req(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
+{
+ int i = 0, rc = 0;
+ uint64_t top = 0, del_req_id = 0;
+
+ if (req_id != 0) {
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ if ((req_id >=
+ fctrl->i2c_data.per_frame[i].request_id) &&
+ (top <
+ fctrl->i2c_data.per_frame[i].request_id) &&
+ (fctrl->i2c_data.per_frame[i].is_settings_valid
+ == 1)) {
+ del_req_id = top;
+ top = fctrl->i2c_data.per_frame[i].request_id;
+ }
+ }
+
+ if (top < req_id) {
+ if ((((top % MAX_PER_FRAME_ARRAY) - (req_id %
+ MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) ||
+ (((top % MAX_PER_FRAME_ARRAY) - (req_id %
+ MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX))
+ del_req_id = req_id;
+ }
+
+ if (!del_req_id)
+ return rc;
+
+ CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu",
+ top, del_req_id);
+ }
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id);
+ return 0;
+}
+
+static int cam_flash_pmic_delete_req(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
+{
+ int i = 0;
+ struct cam_flash_frame_setting *flash_data = NULL;
+ uint64_t top = 0, del_req_id = 0;
+
+ if (req_id != 0) {
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ flash_data = &fctrl->per_frame[i];
+ if (req_id >= flash_data->cmn_attr.request_id &&
+ flash_data->cmn_attr.is_settings_valid
+ == 1) {
+ if (top < flash_data->cmn_attr.request_id) {
+ del_req_id = top;
+ top = flash_data->cmn_attr.request_id;
+ } else if (top >
+ flash_data->cmn_attr.request_id &&
+ del_req_id <
+ flash_data->cmn_attr.request_id) {
+ del_req_id =
+ flash_data->cmn_attr.request_id;
+ }
+ }
+ }
+
+ if (top < req_id) {
+ if ((((top % MAX_PER_FRAME_ARRAY) - (req_id %
+ MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) ||
+ (((top % MAX_PER_FRAME_ARRAY) - (req_id %
+ MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX))
+ del_req_id = req_id;
+ }
+
+ if (!del_req_id)
+ return 0;
+
+ CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu",
+ top, del_req_id);
+ }
+
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id);
+ return 0;
+}
+
+static int32_t cam_flash_slaveInfo_pkt_parser(struct cam_flash_ctrl *fctrl,
+ uint32_t *cmd_buf)
+{
+ int32_t rc = 0;
+ struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+
+ if (fctrl->io_master_info.master_type == CCI_MASTER) {
+ fctrl->io_master_info.cci_client->cci_i2c_master =
+ fctrl->cci_i2c_master;
+ fctrl->io_master_info.cci_client->i2c_freq_mode =
+ i2c_info->i2c_freq_mode;
+ fctrl->io_master_info.cci_client->sid =
+ i2c_info->slave_addr >> 1;
+ CAM_DBG(CAM_FLASH, "Slave addr: 0x%x Freq Mode: %d",
+ i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+ } else if (fctrl->io_master_info.master_type == I2C_MASTER) {
+ fctrl->io_master_info.client->addr = i2c_info->slave_addr;
+ CAM_DBG(CAM_FLASH, "Slave addr: 0x%x", i2c_info->slave_addr);
+ } else {
+ CAM_ERR(CAM_FLASH, "Invalid Master type: %d",
+ fctrl->io_master_info.master_type);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
+{
+ struct i2c_settings_list *i2c_list;
+ struct i2c_settings_array *i2c_set = NULL;
+ int frame_offset = 0, rc = 0;
+
+ if (req_id == 0) {
+ /* NonRealTime Init settings*/
+ if (fctrl->i2c_data.init_settings.is_settings_valid == true) {
+ list_for_each_entry(i2c_list,
+ &(fctrl->i2c_data.init_settings.list_head),
+ list) {
+ rc = cam_sensor_util_i2c_apply_setting
+ (&(fctrl->io_master_info), i2c_list);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to apply init settings: %d",
+ rc);
+ return rc;
+ }
+ }
+ }
+ /* NonRealTime (Widget/RER/INIT_FIRE settings) */
+ if (fctrl->i2c_data.config_settings.is_settings_valid == true) {
+ list_for_each_entry(i2c_list,
+ &(fctrl->i2c_data.config_settings.list_head),
+ list) {
+ rc = cam_sensor_util_i2c_apply_setting
+ (&(fctrl->io_master_info), i2c_list);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to apply NRT settings: %d", rc);
+ return rc;
+ }
+ }
+ }
+ } else {
+ /* RealTime */
+ frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+ i2c_set = &fctrl->i2c_data.per_frame[frame_offset];
+ if ((i2c_set->is_settings_valid == true) &&
+ (i2c_set->request_id == req_id)) {
+ list_for_each_entry(i2c_list,
+ &(i2c_set->list_head), list) {
+ rc = cam_sensor_util_i2c_apply_setting(
+ &(fctrl->io_master_info), i2c_list);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to apply settings: %d", rc);
+ return rc;
+ }
+ }
+ }
+ }
+
+ cam_flash_i2c_delete_req(fctrl, req_id);
+ return rc;
+}
+
+int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
+{
+ int rc = 0, i = 0;
+ int frame_offset = 0;
+ uint16_t num_iterations;
+ struct cam_flash_frame_setting *flash_data = NULL;
+
+ if (req_id == 0) {
+ if (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE) {
+ flash_data = &fctrl->nrt_info;
+ CAM_DBG(CAM_REQ,
+ "FLASH_INIT_FIRE req_id: %u flash_opcode: %d",
+ req_id, flash_data->opcode);
+
+ if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
+ if (fctrl->flash_state ==
+ CAM_FLASH_STATE_START) {
+ CAM_WARN(CAM_FLASH,
+ "Wrong state :Prev state: %d",
+ fctrl->flash_state);
+ return -EINVAL;
+ }
+
+ rc = cam_flash_high(fctrl, flash_data);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "FLASH ON failed : %d", rc);
+ }
+ if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+ if (fctrl->flash_state ==
+ CAM_FLASH_STATE_START) {
+ CAM_WARN(CAM_FLASH,
+ "Wrong state :Prev state: %d",
+ fctrl->flash_state);
+ return -EINVAL;
+ }
+
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "TORCH ON failed : %d", rc);
+ }
+ if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_OFF) {
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "LED OFF FAILED: %d",
+ rc);
+ return rc;
+ }
+ }
+ } else if (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) {
+ flash_data = &fctrl->nrt_info;
+ CAM_DBG(CAM_REQ,
+ "FLASH_WIDGET req_id: %u flash_opcode: %d",
+ req_id, flash_data->opcode);
+
+ if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Torch ON failed : %d",
+ rc);
+ goto nrt_del_req;
+ }
+ } else if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_OFF) {
+ rc = cam_flash_off(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "LED off failed: %d",
+ rc);
+ }
+ } else if (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
+ flash_data = &fctrl->nrt_info;
+ if (fctrl->flash_state != CAM_FLASH_STATE_START) {
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed: %d",
+ rc);
+ goto nrt_del_req;
+ }
+ }
+ CAM_DBG(CAM_REQ, "FLASH_RER req_id: %u", req_id);
+
+ num_iterations = flash_data->num_iterations;
+ for (i = 0; i < num_iterations; i++) {
+ /* Turn On Torch */
+ if (fctrl->flash_state ==
+ CAM_FLASH_STATE_START) {
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Fire Torch Failed");
+ goto nrt_del_req;
+ }
+
+ usleep_range(
+ flash_data->led_on_delay_ms * 1000,
+ flash_data->led_on_delay_ms * 1000 +
+ 100);
+ }
+ /* Turn Off Torch */
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed: %d", rc);
+ continue;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_START;
+ usleep_range(
+ flash_data->led_off_delay_ms * 1000,
+ flash_data->led_off_delay_ms * 1000 + 100);
+ }
+ }
+ } else {
+ frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+ flash_data = &fctrl->per_frame[frame_offset];
+ CAM_DBG(CAM_REQ, "FLASH_RT req_id: %u flash_opcode: %d",
+ req_id, flash_data->opcode);
+
+ if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) &&
+ (flash_data->cmn_attr.is_settings_valid) &&
+ (flash_data->cmn_attr.request_id == req_id)) {
+ /* Turn On Flash */
+ if (fctrl->flash_state == CAM_FLASH_STATE_START) {
+ rc = cam_flash_high(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash ON failed: rc= %d",
+ rc);
+ goto apply_setting_err;
+ }
+ }
+ } else if ((flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIRELOW) &&
+ (flash_data->cmn_attr.is_settings_valid) &&
+ (flash_data->cmn_attr.request_id == req_id)) {
+ /* Turn On Torch */
+ if (fctrl->flash_state == CAM_FLASH_STATE_START) {
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Torch ON failed: rc= %d",
+ rc);
+ goto apply_setting_err;
+ }
+ }
+ } else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
+ (flash_data->cmn_attr.is_settings_valid) &&
+ (flash_data->cmn_attr.request_id == req_id)) {
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed %d", rc);
+ goto apply_setting_err;
+ }
+ } else if (flash_data->opcode == CAM_PKT_NOP_OPCODE) {
+ CAM_DBG(CAM_FLASH, "NOP Packet");
+ } else {
+ rc = -EINVAL;
+ CAM_ERR(CAM_FLASH, "Invalid opcode: %d req_id: %llu",
+ flash_data->opcode, req_id);
+ goto apply_setting_err;
+ }
+ }
+
+nrt_del_req:
+ cam_flash_pmic_delete_req(fctrl, req_id);
+apply_setting_err:
+ return rc;
+}
+
+int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+ int rc = 0, i = 0;
+ uintptr_t generic_ptr;
+ uint32_t total_cmd_buf_in_bytes = 0;
+ uint32_t processed_cmd_buf_in_bytes = 0;
+ uint16_t cmd_length_in_bytes = 0;
+ uint32_t *cmd_buf = NULL;
+ uint32_t *offset = NULL;
+ uint32_t frm_offset = 0;
+ size_t len_of_buffer;
+ struct cam_flash_init *flash_init = NULL;
+ struct common_header *cmn_hdr = NULL;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct cam_config_dev_cmd config;
+ struct cam_req_mgr_add_request add_req;
+ struct i2c_data_settings *i2c_data = NULL;
+ struct i2c_settings_array *i2c_reg_settings = NULL;
+ struct cam_sensor_power_ctrl_t *power_info = NULL;
+
+ if (!fctrl || !arg) {
+ CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+ return -EINVAL;
+ }
+ /* getting CSL Packet */
+ ioctl_ctrl = (struct cam_control *)arg;
+
+ if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle,
+ sizeof(config))) {
+ CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+ return -EFAULT;
+ }
+
+ rc = cam_mem_get_cpu_buf(config.packet_handle,
+ &generic_ptr, &len_of_buffer);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+ return rc;
+ }
+
+ if (config.offset > len_of_buffer) {
+ CAM_ERR(CAM_FLASH,
+ "offset is out of bounds: offset: %lld len: %zu",
+ config.offset, len_of_buffer);
+ return -EINVAL;
+ }
+
+ /* Add offset to the flash csl header */
+ csl_packet = (struct cam_packet *)(generic_ptr + config.offset);
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_FLASH_PACKET_OPCODE_INIT: {
+ /* INIT packet*/
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+ /* Loop through multiple command buffers */
+ for (i = 1; i < csl_packet->num_cmd_buf; i++) {
+ total_cmd_buf_in_bytes = cmd_desc[i].length;
+ processed_cmd_buf_in_bytes = 0;
+ if (!total_cmd_buf_in_bytes)
+ continue;
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &generic_ptr, &len_of_buffer);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH, "Failed to get cpu buf");
+ return rc;
+ }
+ cmd_buf = (uint32_t *)generic_ptr;
+ if (!cmd_buf) {
+ CAM_ERR(CAM_FLASH, "invalid cmd buf");
+ return -EINVAL;
+ }
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+ cmn_hdr = (struct common_header *)cmd_buf;
+
+ /* Loop through cmd formats in one cmd buffer */
+ CAM_DBG(CAM_FLASH,
+ "command Type: %d,Processed: %d,Total: %d",
+ cmn_hdr->cmd_type, processed_cmd_buf_in_bytes,
+ total_cmd_buf_in_bytes);
+ switch (cmn_hdr->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO:
+ flash_init = (struct cam_flash_init *)cmd_buf;
+ fctrl->flash_type = flash_init->flash_type;
+ cmd_length_in_bytes =
+ sizeof(struct cam_flash_init);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+ rc = cam_flash_slaveInfo_pkt_parser(
+ fctrl, cmd_buf);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH,
+ "Failed parsing slave info: rc: %d",
+ rc);
+ return rc;
+ }
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_i2c_info);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+ case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+ CAM_DBG(CAM_FLASH,
+ "Received power settings");
+ cmd_length_in_bytes =
+ total_cmd_buf_in_bytes;
+ rc = cam_sensor_update_power_settings(
+ cmd_buf,
+ total_cmd_buf_in_bytes,
+ &fctrl->power_info);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed update power settings");
+ return rc;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_FLASH,
+ "Received initSettings");
+ i2c_data = &(fctrl->i2c_data);
+ i2c_reg_settings =
+ &fctrl->i2c_data.init_settings;
+
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = 1;
+ rc = cam_sensor_i2c_command_parser(
+ &fctrl->io_master_info,
+ i2c_reg_settings,
+ &cmd_desc[i], 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH,
+ "pkt parsing failed: %d", rc);
+ return rc;
+ }
+ cmd_length_in_bytes =
+ cmd_desc[i].length;
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+
+ break;
+ }
+ }
+ power_info = &fctrl->power_info;
+ if (!power_info) {
+ CAM_ERR(CAM_FLASH, "Power_info is NULL");
+ return -EINVAL;
+ }
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(&fctrl->soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "failed to fill vreg params for power up rc:%d",
+ rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ &fctrl->soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "failed to fill vreg params power down rc:%d",
+ rc);
+ return rc;
+ }
+
+ rc = fctrl->func_tbl.power_ops(fctrl, true);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ return rc;
+ }
+
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
+ return rc;
+ }
+
+ fctrl->flash_state = CAM_FLASH_STATE_CONFIG;
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ frm_offset = csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY;
+ /* add support for handling i2c_data*/
+ i2c_reg_settings =
+ &fctrl->i2c_data.per_frame[frm_offset];
+ if (i2c_reg_settings->is_settings_valid == true) {
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = false;
+ goto update_req_mgr;
+ }
+ i2c_reg_settings->is_settings_valid = true;
+ i2c_reg_settings->request_id =
+ csl_packet->header.request_id;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_sensor_i2c_command_parser(
+ &fctrl->io_master_info,
+ i2c_reg_settings, cmd_desc, 1);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed in parsing i2c packets");
+ return rc;
+ }
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+
+ /* add support for handling i2c_data*/
+ i2c_reg_settings = &fctrl->i2c_data.config_settings;
+ if (i2c_reg_settings->is_settings_valid == true) {
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = false;
+
+ rc = delete_request(i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed in Deleting the err: %d", rc);
+ return rc;
+ }
+ }
+ i2c_reg_settings->is_settings_valid = true;
+ i2c_reg_settings->request_id =
+ csl_packet->header.request_id;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_sensor_i2c_command_parser(
+ &fctrl->io_master_info,
+ i2c_reg_settings, cmd_desc, 1);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed in parsing i2c NRT packets");
+ return rc;
+ }
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Apply setting failed: %d", rc);
+ return rc;
+ }
+ case CAM_PKT_NOP_OPCODE: {
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) {
+ CAM_WARN(CAM_FLASH,
+ "Rxed NOP packets without linking");
+ frm_offset = csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY;
+ fctrl->i2c_data.per_frame[frm_offset].is_settings_valid
+ = false;
+ return 0;
+ }
+
+ CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+ csl_packet->header.request_id);
+ goto update_req_mgr;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+ (csl_packet->header.op_code & 0xFFFFFF));
+ return -EINVAL;
+ }
+update_req_mgr:
+ if (((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_PKT_NOP_OPCODE) ||
+ ((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+ add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+ add_req.req_id = csl_packet->header.request_id;
+ add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+
+ if ((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_FLASH_PACKET_OPCODE_SET_OPS)
+ add_req.skip_before_applying = 1;
+ else
+ add_req.skip_before_applying = 0;
+
+ if (fctrl->bridge_intf.crm_cb &&
+ fctrl->bridge_intf.crm_cb->add_req)
+ fctrl->bridge_intf.crm_cb->add_req(&add_req);
+ CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+ }
+ return rc;
+}
+
+int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+ int rc = 0, i = 0;
+ uintptr_t generic_ptr;
+ uint32_t *cmd_buf = NULL;
+ uint32_t *offset = NULL;
+ uint32_t frm_offset = 0;
+ size_t len_of_buffer;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct common_header *cmn_hdr;
+ struct cam_config_dev_cmd config;
+ struct cam_req_mgr_add_request add_req = {0};
+ struct cam_flash_init *cam_flash_info = NULL;
+ struct cam_flash_set_rer *flash_rer_info = NULL;
+ struct cam_flash_set_on_off *flash_operation_info = NULL;
+ struct cam_flash_query_curr *flash_query_info = NULL;
+ struct cam_flash_frame_setting *flash_data = NULL;
+
+ if (!fctrl || !arg) {
+ CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+ return -EINVAL;
+ }
+ /* getting CSL Packet */
+ ioctl_ctrl = (struct cam_control *)arg;
+
+ if (copy_from_user((&config),
+ u64_to_user_ptr(ioctl_ctrl->handle),
+ sizeof(config))) {
+ CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+ rc = -EFAULT;
+ return rc;
+ }
+
+ rc = cam_mem_get_cpu_buf(config.packet_handle,
+ &generic_ptr, &len_of_buffer);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+ return rc;
+ }
+
+ if (config.offset > len_of_buffer) {
+ CAM_ERR(CAM_FLASH,
+ "offset is out of bounds: offset: %lld len: %zu",
+ config.offset, len_of_buffer);
+ return -EINVAL;
+ }
+
+ /* Add offset to the flash csl header */
+ csl_packet =
+ (struct cam_packet *)(generic_ptr + (uint32_t)config.offset);
+
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_FLASH_PACKET_OPCODE_INIT: {
+ /* INIT packet*/
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ &generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+ cam_flash_info = (struct cam_flash_init *)cmd_buf;
+
+ switch (cam_flash_info->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: {
+ CAM_DBG(CAM_FLASH, "INIT_INFO CMD CALLED");
+ fctrl->flash_init_setting.cmn_attr.request_id = 0;
+ fctrl->flash_init_setting.cmn_attr.is_settings_valid =
+ true;
+ fctrl->flash_type = cam_flash_info->flash_type;
+ fctrl->is_regulator_enabled = false;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO;
+
+ rc = fctrl->func_tbl.power_ops(fctrl, true);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ return rc;
+ }
+
+ fctrl->flash_state =
+ CAM_FLASH_STATE_CONFIG;
+ break;
+ }
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE: {
+ CAM_DBG(CAM_FLASH, "INIT_FIRE Operation");
+
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ fctrl->nrt_info.cmn_attr.count =
+ flash_operation_info->count;
+ fctrl->nrt_info.cmn_attr.request_id = 0;
+ fctrl->nrt_info.opcode =
+ flash_operation_info->opcode;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE;
+ for (i = 0;
+ i < flash_operation_info->count; i++)
+ fctrl->nrt_info.led_current_ma[i] =
+ flash_operation_info->led_current_ma[i];
+
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Apply setting failed: %d",
+ rc);
+
+ fctrl->flash_state = CAM_FLASH_STATE_CONFIG;
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+ cam_flash_info->cmd_type);
+ return -EINVAL;
+ }
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ frm_offset = csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY;
+ flash_data = &fctrl->per_frame[frm_offset];
+
+ if (flash_data->cmn_attr.is_settings_valid == true) {
+ flash_data->cmn_attr.request_id = 0;
+ flash_data->cmn_attr.is_settings_valid = false;
+ for (i = 0; i < flash_data->cmn_attr.count; i++)
+ flash_data->led_current_ma[i] = 0;
+ }
+
+ flash_data->cmn_attr.request_id = csl_packet->header.request_id;
+ flash_data->cmn_attr.is_settings_valid = true;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ &generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+
+ if (!cmd_buf)
+ return -EINVAL;
+
+ cmn_hdr = (struct common_header *)cmd_buf;
+
+ switch (cmn_hdr->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
+ CAM_DBG(CAM_FLASH,
+ "CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE cmd called");
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state ==
+ CAM_FLASH_STATE_ACQUIRE)) {
+ CAM_WARN(CAM_FLASH,
+ "Rxed Flash fire ops without linking");
+ flash_data->cmn_attr.is_settings_valid = false;
+ return 0;
+ }
+
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ if (!flash_operation_info) {
+ CAM_ERR(CAM_FLASH,
+ "flash_operation_info Null");
+ return -EINVAL;
+ }
+
+ flash_data->opcode = flash_operation_info->opcode;
+ flash_data->cmn_attr.count =
+ flash_operation_info->count;
+ for (i = 0; i < flash_operation_info->count; i++)
+ flash_data->led_current_ma[i]
+ = flash_operation_info->led_current_ma[i];
+
+ if (flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF)
+ add_req.skip_before_applying |= SKIP_NEXT_FRAME;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+ cmn_hdr->cmd_type);
+ return -EINVAL;
+ }
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ fctrl->nrt_info.cmn_attr.is_settings_valid = true;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ &generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+ cmn_hdr = (struct common_header *)cmd_buf;
+
+ switch (cmn_hdr->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET: {
+ CAM_DBG(CAM_FLASH, "Widget Flash Operation");
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ fctrl->nrt_info.cmn_attr.count =
+ flash_operation_info->count;
+ fctrl->nrt_info.cmn_attr.request_id = 0;
+ fctrl->nrt_info.opcode =
+ flash_operation_info->opcode;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET;
+
+ for (i = 0; i < flash_operation_info->count; i++)
+ fctrl->nrt_info.led_current_ma[i] =
+ flash_operation_info->led_current_ma[i];
+
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Apply setting failed: %d",
+ rc);
+ return rc;
+ }
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: {
+ int query_curr_ma = 0;
+
+ flash_query_info =
+ (struct cam_flash_query_curr *)cmd_buf;
+
+ rc = qpnp_flash_led_prepare(fctrl->switch_trigger,
+ QUERY_MAX_CURRENT, &query_curr_ma);
+ CAM_DBG(CAM_FLASH, "query_curr_ma = %d",
+ query_curr_ma);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Query current failed with rc=%d", rc);
+ return rc;
+ }
+ flash_query_info->query_current_ma = query_curr_ma;
+ break;
+ }
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_RER: {
+ rc = 0;
+ flash_rer_info = (struct cam_flash_set_rer *)cmd_buf;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER;
+ fctrl->nrt_info.opcode = flash_rer_info->opcode;
+ fctrl->nrt_info.cmn_attr.count = flash_rer_info->count;
+ fctrl->nrt_info.cmn_attr.request_id = 0;
+ fctrl->nrt_info.num_iterations =
+ flash_rer_info->num_iteration;
+ fctrl->nrt_info.led_on_delay_ms =
+ flash_rer_info->led_on_delay_ms;
+ fctrl->nrt_info.led_off_delay_ms =
+ flash_rer_info->led_off_delay_ms;
+
+ for (i = 0; i < flash_rer_info->count; i++)
+ fctrl->nrt_info.led_current_ma[i] =
+ flash_rer_info->led_current_ma[i];
+
+
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "apply_setting failed: %d",
+ rc);
+ return rc;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type : %d",
+ cmn_hdr->cmd_type);
+ return -EINVAL;
+ }
+ break;
+ }
+ case CAM_PKT_NOP_OPCODE: {
+ frm_offset = csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY;
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) {
+ CAM_WARN(CAM_FLASH,
+ "Rxed NOP packets without linking");
+ fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid
+ = false;
+ return 0;
+ }
+
+ fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid = false;
+ fctrl->per_frame[frm_offset].cmn_attr.request_id = 0;
+ fctrl->per_frame[frm_offset].opcode = CAM_PKT_NOP_OPCODE;
+ CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %llu",
+ csl_packet->header.request_id);
+ goto update_req_mgr;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+ (csl_packet->header.op_code & 0xFFFFFF));
+ return -EINVAL;
+ }
+update_req_mgr:
+ if (((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_PKT_NOP_OPCODE) ||
+ ((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+ add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+ add_req.req_id = csl_packet->header.request_id;
+ add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+
+ if ((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_FLASH_PACKET_OPCODE_SET_OPS)
+ add_req.skip_before_applying |= 1;
+ else
+ add_req.skip_before_applying = 0;
+
+ if (fctrl->bridge_intf.crm_cb &&
+ fctrl->bridge_intf.crm_cb->add_req)
+ fctrl->bridge_intf.crm_cb->add_req(&add_req);
+ CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+ }
+
+ return rc;
+}
+
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+ info->dev_id = CAM_REQ_MGR_DEVICE_FLASH;
+ strlcpy(info->name, CAM_FLASH_NAME, sizeof(info->name));
+ info->p_delay = CAM_FLASH_PIPELINE_DELAY;
+ info->trigger = CAM_TRIGGER_POINT_SOF;
+ return 0;
+}
+
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
+{
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ if (!link)
+ return -EINVAL;
+
+ fctrl = (struct cam_flash_ctrl *)cam_get_device_priv(link->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, " Device data is NULL");
+ return -EINVAL;
+ }
+ mutex_lock(&fctrl->flash_mutex);
+ if (link->link_enable) {
+ fctrl->bridge_intf.link_hdl = link->link_hdl;
+ fctrl->bridge_intf.crm_cb = link->crm_cb;
+ } else {
+ fctrl->bridge_intf.link_hdl = -1;
+ fctrl->bridge_intf.crm_cb = NULL;
+ }
+ mutex_unlock(&fctrl->flash_mutex);
+
+ return 0;
+}
+
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl)
+{
+ int rc = 0;
+
+ if (fctrl->bridge_intf.device_hdl != 1) {
+ rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Failed in destroying device handle rc = %d",
+ rc);
+ fctrl->bridge_intf.device_hdl = -1;
+ fctrl->bridge_intf.link_hdl = -1;
+ fctrl->bridge_intf.session_hdl = -1;
+ }
+
+ return rc;
+}
+
+void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
+{
+ int rc;
+
+ if (fctrl->flash_state == CAM_FLASH_STATE_INIT)
+ return;
+
+ if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_START)) {
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+ rc = fctrl->func_tbl.power_ops(fctrl, false);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Power Down Failed rc: %d",
+ rc);
+ }
+
+ rc = cam_flash_release_dev(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Release failed rc: %d", rc);
+
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+}
+
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ if (!apply)
+ return -EINVAL;
+
+ fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(apply->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&fctrl->flash_mutex);
+ rc = fctrl->func_tbl.apply_setting(fctrl, apply->request_id);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d",
+ rc);
+ mutex_unlock(&fctrl->flash_mutex);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_core.h
new file mode 100644
index 000000000000..1bd3b31c1668
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FLASH_CORE_H_
+#define _CAM_FLASH_CORE_H_
+
+#include <linux/leds-qpnp-flash.h>
+#include <media/cam_sensor.h>
+#include "cam_flash_dev.h"
+
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info);
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply);
+int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data);
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush);
+
+
+#endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_dev.c
new file mode 100644
index 000000000000..f5177d6796b5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -0,0 +1,638 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include "cam_flash_dev.h"
+#include "cam_flash_soc.h"
+#include "cam_flash_core.h"
+#include "cam_common_util.h"
+
+static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
+ void *arg, struct cam_flash_private_soc *soc_private)
+{
+ int rc = 0;
+ int i = 0;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (!fctrl || !arg) {
+ CAM_ERR(CAM_FLASH, "fctrl/arg is NULL with arg:%pK fctrl%pK",
+ fctrl, arg);
+ return -EINVAL;
+ }
+
+ if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+ CAM_ERR(CAM_FLASH, "Invalid handle type: %d",
+ cmd->handle_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&(fctrl->flash_mutex));
+ switch (cmd->op_code) {
+ case CAM_ACQUIRE_DEV: {
+ struct cam_sensor_acquire_dev flash_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+
+ CAM_DBG(CAM_FLASH, "CAM_ACQUIRE_DEV");
+
+ if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+ CAM_ERR(CAM_FLASH,
+ "Cannot apply Acquire dev: Prev state: %d",
+ fctrl->flash_state);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ if (fctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_FLASH, "Device is already acquired");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ rc = copy_from_user(&flash_acq_dev,
+ u64_to_user_ptr(cmd->handle),
+ sizeof(flash_acq_dev));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Copying from User");
+ goto release_mutex;
+ }
+
+ bridge_params.session_hdl = flash_acq_dev.session_handle;
+ bridge_params.ops = &fctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = fctrl;
+
+ flash_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ fctrl->bridge_intf.device_hdl =
+ flash_acq_dev.device_handle;
+ fctrl->bridge_intf.session_hdl =
+ flash_acq_dev.session_handle;
+
+ rc = copy_to_user(u64_to_user_ptr(cmd->handle),
+ &flash_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Copy to User with rc = %d",
+ rc);
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
+ break;
+ }
+ case CAM_RELEASE_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_START)) {
+ CAM_WARN(CAM_FLASH,
+ "Wrong state for Release dev: Prev state:%d",
+ fctrl->flash_state);
+ }
+
+ if (fctrl->bridge_intf.device_hdl == -1 &&
+ fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE) {
+ CAM_ERR(CAM_FLASH,
+ "Invalid Handle: Link Hdl: %d device hdl: %d",
+ fctrl->bridge_intf.device_hdl,
+ fctrl->bridge_intf.link_hdl);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ if (fctrl->bridge_intf.link_hdl != -1) {
+ CAM_ERR(CAM_SENSOR,
+ "Device [%d] still active on link 0x%x",
+ fctrl->flash_state,
+ fctrl->bridge_intf.link_hdl);
+ rc = -EAGAIN;
+ goto release_mutex;
+ }
+
+ if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_START))
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+
+ if (cam_flash_release_dev(fctrl))
+ CAM_WARN(CAM_FLASH,
+ "Failed in destroying the device Handle");
+
+ if (fctrl->func_tbl.power_ops(fctrl, false))
+ CAM_WARN(CAM_FLASH, "Power Down Failed");
+
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+ break;
+ }
+ case CAM_QUERY_CAP: {
+ struct cam_flash_query_cap_info flash_cap = {0};
+
+ CAM_DBG(CAM_FLASH, "CAM_QUERY_CAP");
+ flash_cap.slot_info = fctrl->soc_info.index;
+ for (i = 0; i < fctrl->flash_num_sources; i++) {
+ flash_cap.max_current_flash[i] =
+ soc_private->flash_max_current[i];
+ flash_cap.max_duration_flash[i] =
+ soc_private->flash_max_duration[i];
+ }
+
+ for (i = 0; i < fctrl->torch_num_sources; i++)
+ flash_cap.max_current_torch[i] =
+ soc_private->torch_max_current[i];
+
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &flash_cap, sizeof(struct cam_flash_query_cap_info))) {
+ CAM_ERR(CAM_FLASH, "Failed Copy to User");
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ break;
+ }
+ case CAM_START_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_START_DEV");
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_START)) {
+ CAM_WARN(CAM_FLASH,
+ "Cannot apply Start Dev: Prev state: %d",
+ fctrl->flash_state);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ fctrl->flash_state = CAM_FLASH_STATE_START;
+ break;
+ }
+ case CAM_STOP_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_STOP_DEV ENTER");
+ if (fctrl->flash_state != CAM_FLASH_STATE_START) {
+ CAM_WARN(CAM_FLASH,
+ "Cannot apply Stop dev: Prev state is: %d",
+ fctrl->flash_state);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+ fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
+ break;
+ }
+ case CAM_CONFIG_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV");
+ rc = fctrl->func_tbl.parser(fctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc);
+ goto release_mutex;
+ }
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid Opcode: %d", cmd->op_code);
+ rc = -EINVAL;
+ }
+
+release_mutex:
+ mutex_unlock(&(fctrl->flash_mutex));
+ return rc;
+}
+
+static int32_t cam_flash_init_default_params(struct cam_flash_ctrl *fctrl)
+{
+ /* Validate input parameters */
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "failed: invalid params fctrl %pK",
+ fctrl);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_FLASH,
+ "master_type: %d", fctrl->io_master_info.master_type);
+ /* Initialize cci_client */
+ if (fctrl->io_master_info.master_type == CCI_MASTER) {
+ fctrl->io_master_info.cci_client = kzalloc(sizeof(
+ struct cam_sensor_cci_client), GFP_KERNEL);
+ if (!(fctrl->io_master_info.cci_client))
+ return -ENOMEM;
+ } else if (fctrl->io_master_info.master_type == I2C_MASTER) {
+ if (!(fctrl->io_master_info.client))
+ return -EINVAL;
+ } else {
+ CAM_ERR(CAM_FLASH,
+ "Invalid master / Master type Not supported");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cam_flash_dt_match[] = {
+ {.compatible = "qcom,camera-flash", .data = NULL},
+ {}
+};
+
+static long cam_flash_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+ struct cam_flash_private_soc *soc_private = NULL;
+
+ CAM_DBG(CAM_FLASH, "Enter");
+
+ fctrl = v4l2_get_subdevdata(sd);
+ soc_private = fctrl->soc_info.soc_private;
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL: {
+ rc = cam_flash_driver_cmd(fctrl, arg,
+ soc_private);
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid ioctl cmd type");
+ rc = -EINVAL;
+ break;
+ }
+
+ CAM_DBG(CAM_FLASH, "Exit");
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_flash_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL: {
+ rc = cam_flash_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "cam_flash_ioctl failed");
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid compat ioctl cmd_type:%d",
+ cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to copy to user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
+ return rc;
+}
+#endif
+
+static int cam_flash_platform_remove(struct platform_device *pdev)
+{
+ struct cam_flash_ctrl *fctrl;
+
+ fctrl = platform_get_drvdata(pdev);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Flash device is NULL");
+ return 0;
+ }
+
+ devm_kfree(&pdev->dev, fctrl);
+
+ return 0;
+}
+
+static int32_t cam_flash_i2c_driver_remove(struct i2c_client *client)
+{
+ int32_t rc = 0;
+ struct cam_flash_ctrl *fctrl = i2c_get_clientdata(client);
+ /* Handle I2C Devices */
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Flash device is NULL");
+ return -EINVAL;
+ }
+ /*Free Allocated Mem */
+ kfree(fctrl->i2c_data.per_frame);
+ fctrl->i2c_data.per_frame = NULL;
+ kfree(fctrl);
+ return rc;
+}
+
+static int cam_flash_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_flash_ctrl *fctrl =
+ v4l2_get_subdevdata(sd);
+
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Flash ctrl ptr is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&fctrl->flash_mutex);
+ cam_flash_shutdown(fctrl);
+ mutex_unlock(&fctrl->flash_mutex);
+
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops cam_flash_subdev_core_ops = {
+ .ioctl = cam_flash_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_flash_subdev_do_ioctl
+#endif
+};
+
+static struct v4l2_subdev_ops cam_flash_subdev_ops = {
+ .core = &cam_flash_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_flash_internal_ops = {
+ .close = cam_flash_subdev_close,
+};
+
+static int cam_flash_init_subdev(struct cam_flash_ctrl *fctrl)
+{
+ int rc = 0;
+
+ fctrl->v4l2_dev_str.internal_ops =
+ &cam_flash_internal_ops;
+ fctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
+ fctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
+ fctrl->v4l2_dev_str.sd_flags =
+ V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ fctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
+ fctrl->v4l2_dev_str.token = fctrl;
+
+ rc = cam_register_subdev(&(fctrl->v4l2_dev_str));
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
+
+ return rc;
+}
+
+static int32_t cam_flash_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0, i = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ CAM_DBG(CAM_FLASH, "Enter");
+ if (!pdev->dev.of_node) {
+ CAM_ERR(CAM_FLASH, "of_node NULL");
+ return -EINVAL;
+ }
+
+ fctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
+ if (!fctrl)
+ return -ENOMEM;
+
+ fctrl->pdev = pdev;
+ fctrl->soc_info.pdev = pdev;
+ fctrl->soc_info.dev = &pdev->dev;
+ fctrl->soc_info.dev_name = pdev->name;
+
+ platform_set_drvdata(pdev, fctrl);
+
+ rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc);
+ kfree(fctrl);
+ return -EINVAL;
+ }
+
+ if (of_find_property(pdev->dev.of_node, "cci-master", NULL)) {
+ /* Get CCI master */
+ rc = of_property_read_u32(pdev->dev.of_node, "cci-master",
+ &fctrl->cci_i2c_master);
+ CAM_DBG(CAM_FLASH, "cci-master %d, rc %d",
+ fctrl->cci_i2c_master, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ fctrl->cci_i2c_master = MASTER_0;
+ rc = 0;
+ }
+
+ fctrl->io_master_info.master_type = CCI_MASTER;
+ rc = cam_flash_init_default_params(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "failed: cam_flash_init_default_params rc %d",
+ rc);
+ return rc;
+ }
+
+ fctrl->i2c_data.per_frame = (struct i2c_settings_array *)
+ kzalloc(sizeof(struct i2c_settings_array) *
+ MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+ if (fctrl->i2c_data.per_frame == NULL) {
+ CAM_ERR(CAM_FLASH, "No Memory");
+ rc = -ENOMEM;
+ goto free_cci_resource;
+ }
+
+ INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head));
+ INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head));
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+ INIT_LIST_HEAD(
+ &(fctrl->i2c_data.per_frame[i].list_head));
+
+ fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser;
+ fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting;
+ fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops;
+ fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request;
+ } else {
+ /* PMIC Flash */
+ fctrl->func_tbl.parser = cam_flash_pmic_pkt_parser;
+ fctrl->func_tbl.apply_setting = cam_flash_pmic_apply_setting;
+ fctrl->func_tbl.power_ops = cam_flash_pmic_power_ops;
+ fctrl->func_tbl.flush_req = cam_flash_pmic_flush_request;
+ }
+
+ rc = cam_flash_init_subdev(fctrl);
+ if (rc) {
+ if (fctrl->io_master_info.cci_client != NULL)
+ goto free_cci_resource;
+ else
+ goto free_resource;
+ }
+
+ fctrl->bridge_intf.device_hdl = -1;
+ fctrl->bridge_intf.link_hdl = -1;
+ fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+ fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+ fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+ fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+
+ mutex_init(&(fctrl->flash_mutex));
+
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+ CAM_DBG(CAM_FLASH, "Probe success");
+ return rc;
+
+free_cci_resource:
+ kfree(fctrl->io_master_info.cci_client);
+ fctrl->io_master_info.cci_client = NULL;
+free_resource:
+ kfree(fctrl->i2c_data.per_frame);
+ kfree(fctrl->soc_info.soc_private);
+ cam_soc_util_release_platform_resource(&fctrl->soc_info);
+ fctrl->i2c_data.per_frame = NULL;
+ fctrl->soc_info.soc_private = NULL;
+ kfree(fctrl);
+ fctrl = NULL;
+ return rc;
+}
+
+static int32_t cam_flash_i2c_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int32_t rc = 0, i = 0;
+ struct cam_flash_ctrl *fctrl;
+
+ if (client == NULL || id == NULL) {
+ CAM_ERR(CAM_FLASH, "Invalid Args client: %pK id: %pK",
+ client, id);
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ CAM_ERR(CAM_FLASH, "%s :: i2c_check_functionality failed",
+ client->name);
+ return -EFAULT;
+ }
+
+ /* Create sensor control structure */
+ fctrl = kzalloc(sizeof(*fctrl), GFP_KERNEL);
+ if (!fctrl)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, fctrl);
+
+ fctrl->io_master_info.client = client;
+ fctrl->soc_info.dev = &client->dev;
+ fctrl->soc_info.dev_name = client->name;
+ fctrl->io_master_info.master_type = I2C_MASTER;
+
+ rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "failed: cam_sensor_parse_dt rc %d", rc);
+ goto free_ctrl;
+ }
+
+ rc = cam_flash_init_subdev(fctrl);
+ if (rc)
+ goto free_ctrl;
+
+ fctrl->i2c_data.per_frame =
+ (struct i2c_settings_array *)
+ kzalloc(sizeof(struct i2c_settings_array) *
+ MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+ if (fctrl->i2c_data.per_frame == NULL) {
+ rc = -ENOMEM;
+ goto unreg_subdev;
+ }
+
+ INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head));
+ INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head));
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+ INIT_LIST_HEAD(&(fctrl->i2c_data.per_frame[i].list_head));
+
+ fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser;
+ fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting;
+ fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops;
+ fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request;
+
+ fctrl->bridge_intf.device_hdl = -1;
+ fctrl->bridge_intf.link_hdl = -1;
+ fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+ fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+ fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+ fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+
+ mutex_init(&(fctrl->flash_mutex));
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+
+ return rc;
+
+unreg_subdev:
+ cam_unregister_subdev(&(fctrl->v4l2_dev_str));
+free_ctrl:
+ kfree(fctrl);
+ fctrl = NULL;
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_flash_dt_match);
+
+static struct platform_driver cam_flash_platform_driver = {
+ .probe = cam_flash_platform_probe,
+ .remove = cam_flash_platform_remove,
+ .driver = {
+ .name = "CAM-FLASH-DRIVER",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_flash_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static const struct i2c_device_id i2c_id[] = {
+ {FLASH_DRIVER_I2C, (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver cam_flash_i2c_driver = {
+ .id_table = i2c_id,
+ .probe = cam_flash_i2c_driver_probe,
+ .remove = cam_flash_i2c_driver_remove,
+ .driver = {
+ .name = FLASH_DRIVER_I2C,
+ },
+};
+
+static int32_t __init cam_flash_init_module(void)
+{
+ int32_t rc = 0;
+
+ rc = platform_driver_register(&cam_flash_platform_driver);
+ if (rc == 0) {
+ CAM_DBG(CAM_FLASH, "platform probe success");
+ return 0;
+ }
+
+ rc = i2c_add_driver(&cam_flash_i2c_driver);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "i2c_add_driver failed rc: %d", rc);
+ return rc;
+}
+
+static void __exit cam_flash_exit_module(void)
+{
+ platform_driver_unregister(&cam_flash_platform_driver);
+ i2c_del_driver(&cam_flash_i2c_driver);
+}
+
+module_init(cam_flash_init_module);
+module_exit(cam_flash_exit_module);
+MODULE_DESCRIPTION("CAM FLASH");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_dev.h
new file mode 100644
index 000000000000..8e5deef871e5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -0,0 +1,226 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _CAM_FLASH_DEV_H_
+#define _CAM_FLASH_DEV_H_
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/cam_sensor.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_subdev.h"
+#include "cam_mem_mgr.h"
+#include "cam_sensor_cmn_header.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+#include "cam_sensor_io.h"
+#include "cam_flash_core.h"
+
+#define CAMX_FLASH_DEV_NAME "cam-flash-dev"
+
+#define CAM_FLASH_PIPELINE_DELAY 1
+
+#define FLASH_DRIVER_I2C "i2c_flash"
+
+#define CAM_FLASH_PACKET_OPCODE_INIT 0
+#define CAM_FLASH_PACKET_OPCODE_SET_OPS 1
+#define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2
+
+struct cam_flash_ctrl;
+
+enum cam_flash_switch_trigger_ops {
+ LED_SWITCH_OFF = 0,
+ LED_SWITCH_ON,
+};
+
+enum cam_flash_state {
+ CAM_FLASH_STATE_INIT,
+ CAM_FLASH_STATE_ACQUIRE,
+ CAM_FLASH_STATE_CONFIG,
+ CAM_FLASH_STATE_START,
+};
+
+enum cam_flash_flush_type {
+ FLUSH_ALL = 0,
+ FLUSH_REQ,
+ FLUSH_MAX,
+};
+
+/**
+ * struct cam_flash_intf_params
+ * @device_hdl : Device Handle
+ * @session_hdl : Session Handle
+ * @link_hdl : Link Handle
+ * @ops : KMD operations
+ * @crm_cb : Callback API pointers
+ */
+struct cam_flash_intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_flash_common_attr
+ * @is_settings_valid : Notify the valid settings
+ * @request_id : Request id provided by umd
+ * @count : Number of led count
+ * @cmd_type : Command buffer type
+ */
+struct cam_flash_common_attr {
+ bool is_settings_valid;
+ uint64_t request_id;
+ uint16_t count;
+ uint8_t cmd_type;
+};
+
+/**
+ * struct flash_init_packet
+ * @cmn_attr : Provides common attributes
+ * @flash_type : Flash type(PMIC/I2C/GPIO)
+ */
+struct cam_flash_init_packet {
+ struct cam_flash_common_attr cmn_attr;
+ uint8_t flash_type;
+};
+
+/**
+ * struct flash_frame_setting
+ * @cmn_attr : Provides common attributes
+ * @num_iterations : Iterations used to perform RER
+ * @led_on_delay_ms : LED on time in milisec
+ * @led_off_delay_ms : LED off time in milisec
+ * @opcode : Command buffer opcode
+ * @led_current_ma[] : LED current array in miliamps
+ *
+ */
+struct cam_flash_frame_setting {
+ struct cam_flash_common_attr cmn_attr;
+ uint16_t num_iterations;
+ uint16_t led_on_delay_ms;
+ uint16_t led_off_delay_ms;
+ int8_t opcode;
+ uint32_t led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+/**
+ * struct cam_flash_private_soc
+ * @switch_trigger_name : Switch trigger name
+ * @flash_trigger_name : Flash trigger name array
+ * @flash_op_current : Flash operational current
+ * @flash_max_current : Max supported current for LED in flash mode
+ * @flash_max_duration : Max turn on duration for LED in Flash mode
+ * @torch_trigger_name : Torch trigger name array
+ * @torch_op_current : Torch operational current
+ * @torch_max_current : Max supported current for LED in torch mode
+ */
+
+struct cam_flash_private_soc {
+ const char *switch_trigger_name;
+ const char *flash_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_max_duration[CAM_FLASH_MAX_LED_TRIGGERS];
+ const char *torch_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t torch_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+struct cam_flash_func_tbl {
+ int (*parser)(struct cam_flash_ctrl *fctrl, void *arg);
+ int (*apply_setting)(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+ int (*power_ops)(struct cam_flash_ctrl *fctrl, bool regulator_enable);
+ int (*flush_req)(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type type, uint64_t req_id);
+};
+
+/**
+ * struct cam_flash_ctrl
+ * @soc_info : Soc related information
+ * @pdev : Platform device
+ * @per_frame[] : Per_frame setting array
+ * @nrt_info : NonRealTime settings
+ * @of_node : Of Node ptr
+ * @v4l2_dev_str : V4L2 device structure
+ * @bridge_intf : CRM interface
+ * @flash_init_setting : Init command buffer structure
+ * @switch_trigger : Switch trigger ptr
+ * @flash_num_sources : Number of flash sources
+ * @torch_num_source : Number of torch sources
+ * @flash_mutex : Mutex for flash operations
+ * @flash_state : Current flash state (LOW/OFF/ON/INIT)
+ * @flash_type : Flash types (PMIC/I2C/GPIO)
+ * @is_regulator_enable : Regulator disable/enable notifier
+ * @func_tbl : Function table for different HW
+ * (e.g. i2c/pmic/gpio)
+ * @flash_trigger : Flash trigger ptr
+ * @torch_trigger : Torch trigger ptr
+ * @cci_i2c_master : I2C structure
+ * @io_master_info : Information about the communication master
+ * @i2c_data : I2C register settings
+ */
+struct cam_flash_ctrl {
+ struct cam_hw_soc_info soc_info;
+ struct platform_device *pdev;
+ struct cam_sensor_power_ctrl_t power_info;
+ struct cam_flash_frame_setting per_frame[MAX_PER_FRAME_ARRAY];
+ struct cam_flash_frame_setting nrt_info;
+ struct device_node *of_node;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_flash_intf_params bridge_intf;
+ struct cam_flash_init_packet flash_init_setting;
+ struct led_trigger *switch_trigger;
+ uint32_t flash_num_sources;
+ uint32_t torch_num_sources;
+ struct mutex flash_mutex;
+ enum cam_flash_state flash_state;
+ uint8_t flash_type;
+ bool is_regulator_enabled;
+ struct cam_flash_func_tbl func_tbl;
+ struct led_trigger *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+ struct led_trigger *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+/* I2C related setting */
+ enum cci_i2c_master_t cci_i2c_master;
+ struct camera_io_master io_master_info;
+ struct i2c_data_settings i2c_data;
+};
+
+int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_off(struct cam_flash_ctrl *fctrl);
+int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl,
+ bool regulator_enable);
+int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl,
+ bool regulator_enable);
+int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type type, uint64_t req_id);
+int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type, uint64_t req_id);
+void cam_flash_shutdown(struct cam_flash_ctrl *fctrl);
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl);
+
+#endif /*_CAM_FLASH_DEV_H_*/
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_soc.c
new file mode 100644
index 000000000000..22a124d86f93
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -0,0 +1,224 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include "cam_flash_soc.h"
+#include "cam_res_mgr_api.h"
+
+static int32_t cam_get_source_node_info(
+ struct device_node *of_node,
+ struct cam_flash_ctrl *fctrl,
+ struct cam_flash_private_soc *soc_private)
+{
+ int32_t rc = 0;
+ uint32_t count = 0, i = 0;
+ struct device_node *flash_src_node = NULL;
+ struct device_node *torch_src_node = NULL;
+ struct device_node *switch_src_node = NULL;
+
+ switch_src_node = of_parse_phandle(of_node, "switch-source", 0);
+ if (!switch_src_node) {
+ CAM_DBG(CAM_FLASH, "switch_src_node NULL");
+ } else {
+ rc = of_property_read_string(switch_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->switch_trigger_name);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "default-led-trigger read failed rc=%d", rc);
+ } else {
+ CAM_DBG(CAM_FLASH, "switch trigger %s",
+ soc_private->switch_trigger_name);
+ cam_res_mgr_led_trigger_register(
+ soc_private->switch_trigger_name,
+ &fctrl->switch_trigger);
+ }
+
+ of_node_put(switch_src_node);
+ }
+
+ if (of_get_property(of_node, "flash-source", &count)) {
+ count /= sizeof(uint32_t);
+
+ if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "Invalid LED count: %d", count);
+ return -EINVAL;
+ }
+
+ fctrl->flash_num_sources = count;
+
+ for (i = 0; i < count; i++) {
+ flash_src_node = of_parse_phandle(of_node,
+ "flash-source", i);
+ if (!flash_src_node) {
+ CAM_WARN(CAM_FLASH, "flash_src_node NULL");
+ continue;
+ }
+
+ rc = of_property_read_string(flash_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->flash_trigger_name[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "defalut-led-trigger read failed rc=%d", rc);
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ CAM_DBG(CAM_FLASH, "default trigger %s",
+ soc_private->flash_trigger_name[i]);
+
+ /* Read operational-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,current-ma",
+ &soc_private->flash_op_current[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH, "op-current: read failed");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,max-current",
+ &soc_private->flash_max_current[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "max-current: read failed");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-duration */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,duration-ms",
+ &soc_private->flash_max_duration[i]);
+ if (rc)
+ CAM_WARN(CAM_FLASH,
+ "max-duration: read failed");
+
+ of_node_put(flash_src_node);
+
+ CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
+ i, soc_private->flash_max_current[i]);
+
+ cam_res_mgr_led_trigger_register(
+ soc_private->flash_trigger_name[i],
+ &fctrl->flash_trigger[i]);
+ }
+ }
+
+ if (of_get_property(of_node, "torch-source", &count)) {
+ count /= sizeof(uint32_t);
+ if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "Invalid LED count : %d", count);
+ return -EINVAL;
+ }
+
+ fctrl->torch_num_sources = count;
+
+ CAM_DBG(CAM_FLASH, "torch_num_sources = %d",
+ fctrl->torch_num_sources);
+ for (i = 0; i < count; i++) {
+ torch_src_node = of_parse_phandle(of_node,
+ "torch-source", i);
+ if (!torch_src_node) {
+ CAM_WARN(CAM_FLASH, "torch_src_node NULL");
+ continue;
+ }
+
+ rc = of_property_read_string(torch_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->torch_trigger_name[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH,
+ "default-trigger read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ /* Read operational-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,current-ma",
+ &soc_private->torch_op_current[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH, "current: read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,max-current",
+ &soc_private->torch_max_current[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH,
+ "max-current: read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ of_node_put(torch_src_node);
+
+ CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
+ i, soc_private->torch_max_current[i]);
+
+ cam_res_mgr_led_trigger_register(
+ soc_private->torch_trigger_name[i],
+ &fctrl->torch_trigger[i]);
+ }
+ }
+
+ return rc;
+}
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+ struct cam_hw_soc_info *soc_info)
+{
+ int32_t rc = 0;
+ struct device_node *of_node = NULL;
+
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "NULL flash control structure");
+ return -EINVAL;
+ }
+
+ soc_info->soc_private =
+ kzalloc(sizeof(struct cam_flash_private_soc), GFP_KERNEL);
+ if (!soc_info->soc_private) {
+ rc = -ENOMEM;
+ goto release_soc_res;
+ }
+ of_node = fctrl->pdev->dev.of_node;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
+ goto free_soc_private;
+ }
+
+ rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "cam_flash_get_pmic_source_info failed rc %d", rc);
+ goto free_soc_private;
+ }
+ return rc;
+
+free_soc_private:
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+release_soc_res:
+ cam_soc_util_release_platform_resource(soc_info);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_soc.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_soc.h
new file mode 100644
index 000000000000..7937fd7d40e5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_flash/cam_flash_soc.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FLASH_SOC_H_
+#define _CAM_FLASH_SOC_H_
+
+#include "cam_flash_dev.h"
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+ struct cam_hw_soc_info *soc_info);
+
+#endif /*_CAM_FLASH_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/Makefile
new file mode 100644
index 000000000000..18cc2498985a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ois_dev.o cam_ois_core.o cam_ois_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_core.c
new file mode 100644
index 000000000000..b8ebc15e7fd5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -0,0 +1,820 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <cam_sensor_cmn_header.h>
+#include "cam_ois_core.h"
+#include "cam_ois_soc.h"
+#include "cam_sensor_util.h"
+#include "cam_debug_util.h"
+#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
+
+int32_t cam_ois_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0;
+
+ power_info->power_setting_size = 1;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_setting)
+ return -ENOMEM;
+
+ power_info->power_setting[0].seq_type = SENSOR_VAF;
+ power_info->power_setting[0].seq_val = CAM_VAF;
+ power_info->power_setting[0].config_val = 1;
+ power_info->power_setting[0].delay = 2;
+
+ power_info->power_down_setting_size = 1;
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ rc = -ENOMEM;
+ goto free_power_settings;
+ }
+
+ power_info->power_down_setting[0].seq_type = SENSOR_VAF;
+ power_info->power_down_setting[0].seq_val = CAM_VAF;
+ power_info->power_down_setting[0].config_val = 0;
+
+ return rc;
+
+free_power_settings:
+ kfree(power_info->power_setting);
+ power_info->power_setting = NULL;
+ power_info->power_setting_size = 0;
+ return rc;
+}
+
+
+/**
+ * cam_ois_get_dev_handle - get device handle
+ * @o_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int cam_ois_get_dev_handle(struct cam_ois_ctrl_t *o_ctrl,
+ void *arg)
+{
+ struct cam_sensor_acquire_dev ois_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (o_ctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_OIS, "Device is already acquired");
+ return -EFAULT;
+ }
+ if (copy_from_user(&ois_acq_dev, u64_to_user_ptr(cmd->handle),
+ sizeof(ois_acq_dev)))
+ return -EFAULT;
+
+ bridge_params.session_hdl = ois_acq_dev.session_handle;
+ bridge_params.ops = &o_ctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = o_ctrl;
+
+ ois_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ o_ctrl->bridge_intf.device_hdl = ois_acq_dev.device_handle;
+ o_ctrl->bridge_intf.session_hdl = ois_acq_dev.session_handle;
+
+ CAM_DBG(CAM_OIS, "Device Handle: %d", ois_acq_dev.device_handle);
+ if (copy_to_user(u64_to_user_ptr(cmd->handle), &ois_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev))) {
+ CAM_ERR(CAM_OIS, "ACQUIRE_DEV: copy to user failed");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int cam_ois_power_up(struct cam_ois_ctrl_t *o_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &o_ctrl->soc_info;
+ struct cam_ois_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ if ((power_info->power_setting == NULL) &&
+ (power_info->power_down_setting == NULL)) {
+ CAM_INFO(CAM_OIS,
+ "Using default power settings");
+ rc = cam_ois_construct_default_power_setting(power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS,
+ "Construct default ois power setting failed.");
+ return rc;
+ }
+ }
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(
+ soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_OIS,
+ "failed to fill vreg params for power up rc:%d", rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_OIS,
+ "failed to fill vreg params for power down rc:%d", rc);
+ return rc;
+ }
+
+ power_info->dev = soc_info->dev;
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "failed in ois power up rc %d", rc);
+ return rc;
+ }
+
+ rc = camera_io_init(&o_ctrl->io_master_info);
+ if (rc)
+ CAM_ERR(CAM_OIS, "cci_init failed: rc: %d", rc);
+
+ return rc;
+}
+
+/**
+ * cam_ois_power_down - power down OIS device
+ * @o_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_ois_power_down(struct cam_ois_ctrl_t *o_ctrl)
+{
+ int32_t rc = 0;
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info =
+ &o_ctrl->soc_info;
+ struct cam_ois_soc_private *soc_private;
+
+ if (!o_ctrl) {
+ CAM_ERR(CAM_OIS, "failed: o_ctrl %pK", o_ctrl);
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+ soc_info = &o_ctrl->soc_info;
+
+ if (!power_info) {
+ CAM_ERR(CAM_OIS, "failed: power_info %pK", power_info);
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "power down the core is failed:%d", rc);
+ return rc;
+ }
+
+ camera_io_release(&o_ctrl->io_master_info);
+
+ return rc;
+}
+
+static int cam_ois_apply_settings(struct cam_ois_ctrl_t *o_ctrl,
+ struct i2c_settings_array *i2c_set)
+{
+ struct i2c_settings_list *i2c_list;
+ int32_t rc = 0;
+ uint32_t i, size;
+
+ if (o_ctrl == NULL || i2c_set == NULL) {
+ CAM_ERR(CAM_OIS, "Invalid Args");
+ return -EINVAL;
+ }
+
+ if (i2c_set->is_settings_valid != 1) {
+ CAM_ERR(CAM_OIS, " Invalid settings");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(i2c_list,
+ &(i2c_set->list_head), list) {
+ if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_RANDOM) {
+ rc = camera_io_dev_write(&(o_ctrl->io_master_info),
+ &(i2c_list->i2c_settings));
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS,
+ "Failed in Applying i2c wrt settings");
+ return rc;
+ }
+ } else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
+ size = i2c_list->i2c_settings.size;
+ for (i = 0; i < size; i++) {
+ rc = camera_io_dev_poll(
+ &(o_ctrl->io_master_info),
+ i2c_list->i2c_settings.reg_setting[i].reg_addr,
+ i2c_list->i2c_settings.reg_setting[i].reg_data,
+ i2c_list->i2c_settings.reg_setting[i].data_mask,
+ i2c_list->i2c_settings.addr_type,
+ i2c_list->i2c_settings.data_type,
+ i2c_list->i2c_settings.reg_setting[i].delay);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS,
+ "i2c poll apply setting Fail");
+ return rc;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int cam_ois_slaveInfo_pkt_parser(struct cam_ois_ctrl_t *o_ctrl,
+ uint32_t *cmd_buf)
+{
+ int32_t rc = 0;
+ struct cam_cmd_ois_info *ois_info;
+
+ if (!o_ctrl || !cmd_buf) {
+ CAM_ERR(CAM_OIS, "Invalid Args");
+ return -EINVAL;
+ }
+
+ ois_info = (struct cam_cmd_ois_info *)cmd_buf;
+ if (o_ctrl->io_master_info.master_type == CCI_MASTER) {
+ o_ctrl->io_master_info.cci_client->i2c_freq_mode =
+ ois_info->i2c_freq_mode;
+ o_ctrl->io_master_info.cci_client->sid =
+ ois_info->slave_addr >> 1;
+ o_ctrl->ois_fw_flag = ois_info->ois_fw_flag;
+ o_ctrl->is_ois_calib = ois_info->is_ois_calib;
+ memcpy(o_ctrl->ois_name, ois_info->ois_name, 32);
+ o_ctrl->io_master_info.cci_client->retries = 3;
+ o_ctrl->io_master_info.cci_client->id_map = 0;
+ memcpy(&(o_ctrl->opcode), &(ois_info->opcode),
+ sizeof(struct cam_ois_opcode));
+ CAM_DBG(CAM_OIS, "Slave addr: 0x%x Freq Mode: %d",
+ ois_info->slave_addr, ois_info->i2c_freq_mode);
+ } else if (o_ctrl->io_master_info.master_type == I2C_MASTER) {
+ o_ctrl->io_master_info.client->addr = ois_info->slave_addr;
+ CAM_DBG(CAM_OIS, "Slave addr: 0x%x", ois_info->slave_addr);
+ } else {
+ CAM_ERR(CAM_OIS, "Invalid Master type : %d",
+ o_ctrl->io_master_info.master_type);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_ois_fw_download(struct cam_ois_ctrl_t *o_ctrl)
+{
+ uint16_t total_bytes = 0;
+ uint8_t *ptr = NULL;
+ int32_t rc = 0, cnt;
+ uint32_t fw_size;
+ const struct firmware *fw = NULL;
+ const char *fw_name_prog = NULL;
+ const char *fw_name_coeff = NULL;
+ char name_prog[32] = {0};
+ char name_coeff[32] = {0};
+ struct device *dev = &(o_ctrl->pdev->dev);
+ struct cam_sensor_i2c_reg_setting i2c_reg_setting;
+ struct page *page = NULL;
+
+ if (!o_ctrl) {
+ CAM_ERR(CAM_OIS, "Invalid Args");
+ return -EINVAL;
+ }
+
+ snprintf(name_coeff, 32, "%s.coeff", o_ctrl->ois_name);
+
+ snprintf(name_prog, 32, "%s.prog", o_ctrl->ois_name);
+
+ /* cast pointer as const pointer*/
+ fw_name_prog = name_prog;
+ fw_name_coeff = name_coeff;
+
+ /* Load FW */
+ rc = request_firmware(&fw, fw_name_prog, dev);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "Failed to locate %s", fw_name_prog);
+ return rc;
+ }
+
+ total_bytes = fw->size;
+ i2c_reg_setting.addr_type = CAMERA_SENSOR_I2C_TYPE_BYTE;
+ i2c_reg_setting.data_type = CAMERA_SENSOR_I2C_TYPE_BYTE;
+ i2c_reg_setting.size = total_bytes;
+ i2c_reg_setting.delay = 0;
+ fw_size = PAGE_ALIGN(sizeof(struct cam_sensor_i2c_reg_array) *
+ total_bytes) >> PAGE_SHIFT;
+ page = cma_alloc(dev_get_cma_area((o_ctrl->soc_info.dev)),
+ fw_size, 0);
+ if (!page) {
+ CAM_ERR(CAM_OIS, "Failed in allocating i2c_array");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ i2c_reg_setting.reg_setting = (struct cam_sensor_i2c_reg_array *) (
+ page_address(page));
+
+ for (cnt = 0, ptr = (uint8_t *)fw->data; cnt < total_bytes;
+ cnt++, ptr++) {
+ i2c_reg_setting.reg_setting[cnt].reg_addr =
+ o_ctrl->opcode.prog;
+ i2c_reg_setting.reg_setting[cnt].reg_data = *ptr;
+ i2c_reg_setting.reg_setting[cnt].delay = 0;
+ i2c_reg_setting.reg_setting[cnt].data_mask = 0;
+ }
+
+ rc = camera_io_dev_write_continuous(&(o_ctrl->io_master_info),
+ &i2c_reg_setting, 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "OIS FW download failed %d", rc);
+ goto release_firmware;
+ }
+ cma_release(dev_get_cma_area((o_ctrl->soc_info.dev)),
+ page, fw_size);
+ page = NULL;
+ fw_size = 0;
+ release_firmware(fw);
+
+ rc = request_firmware(&fw, fw_name_coeff, dev);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "Failed to locate %s", fw_name_coeff);
+ return rc;
+ }
+
+ total_bytes = fw->size;
+ i2c_reg_setting.addr_type = CAMERA_SENSOR_I2C_TYPE_BYTE;
+ i2c_reg_setting.data_type = CAMERA_SENSOR_I2C_TYPE_BYTE;
+ i2c_reg_setting.size = total_bytes;
+ i2c_reg_setting.delay = 0;
+ fw_size = PAGE_ALIGN(sizeof(struct cam_sensor_i2c_reg_array) *
+ total_bytes) >> PAGE_SHIFT;
+ page = cma_alloc(dev_get_cma_area((o_ctrl->soc_info.dev)),
+ fw_size, 0);
+ if (!page) {
+ CAM_ERR(CAM_OIS, "Failed in allocating i2c_array");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ i2c_reg_setting.reg_setting = (struct cam_sensor_i2c_reg_array *) (
+ page_address(page));
+
+ for (cnt = 0, ptr = (uint8_t *)fw->data; cnt < total_bytes;
+ cnt++, ptr++) {
+ i2c_reg_setting.reg_setting[cnt].reg_addr =
+ o_ctrl->opcode.coeff;
+ i2c_reg_setting.reg_setting[cnt].reg_data = *ptr;
+ i2c_reg_setting.reg_setting[cnt].delay = 0;
+ i2c_reg_setting.reg_setting[cnt].data_mask = 0;
+ }
+
+ rc = camera_io_dev_write_continuous(&(o_ctrl->io_master_info),
+ &i2c_reg_setting, 1);
+ if (rc < 0)
+ CAM_ERR(CAM_OIS, "OIS FW download failed %d", rc);
+
+release_firmware:
+ cma_release(dev_get_cma_area((o_ctrl->soc_info.dev)),
+ page, fw_size);
+ release_firmware(fw);
+
+ return rc;
+}
+
+/**
+ * cam_ois_pkt_parse - Parse csl packet
+ * @o_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
+{
+ int32_t rc = 0;
+ int32_t i = 0;
+ uint32_t total_cmd_buf_in_bytes = 0;
+ struct common_header *cmm_hdr = NULL;
+ uintptr_t generic_ptr;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_config_dev_cmd dev_config;
+ struct i2c_settings_array *i2c_reg_settings = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ uintptr_t generic_pkt_addr;
+ size_t pkt_len;
+ struct cam_packet *csl_packet = NULL;
+ size_t len_of_buff = 0;
+ uint32_t *offset = NULL, *cmd_buf;
+ struct cam_ois_soc_private *soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+ ioctl_ctrl = (struct cam_control *)arg;
+ if (copy_from_user(&dev_config,
+ u64_to_user_ptr(ioctl_ctrl->handle),
+ sizeof(dev_config)))
+ return -EFAULT;
+ rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
+ &generic_pkt_addr, &pkt_len);
+ if (rc) {
+ CAM_ERR(CAM_OIS,
+ "error in converting command Handle Error: %d", rc);
+ return rc;
+ }
+
+ if (dev_config.offset > pkt_len) {
+ CAM_ERR(CAM_OIS,
+ "offset is out of bound: off: %lld len: %zu",
+ dev_config.offset, pkt_len);
+ return -EINVAL;
+ }
+
+ csl_packet = (struct cam_packet *)
+ (generic_pkt_addr + (uint32_t)dev_config.offset);
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_OIS_PACKET_OPCODE_INIT:
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+ /* Loop through multiple command buffers */
+ for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+ total_cmd_buf_in_bytes = cmd_desc[i].length;
+ if (!total_cmd_buf_in_bytes)
+ continue;
+
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &generic_ptr, &len_of_buff);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "Failed to get cpu buf");
+ return rc;
+ }
+ cmd_buf = (uint32_t *)generic_ptr;
+ if (!cmd_buf) {
+ CAM_ERR(CAM_OIS, "invalid cmd buf");
+ return -EINVAL;
+ }
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+ cmm_hdr = (struct common_header *)cmd_buf;
+
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+ rc = cam_ois_slaveInfo_pkt_parser(
+ o_ctrl, cmd_buf);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS,
+ "Failed in parsing slave info");
+ return rc;
+ }
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+ case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+ CAM_DBG(CAM_OIS,
+ "Received power settings buffer");
+ rc = cam_sensor_update_power_settings(
+ cmd_buf,
+ total_cmd_buf_in_bytes,
+ power_info);
+ if (rc) {
+ CAM_ERR(CAM_OIS,
+ "Failed: parse power settings");
+ return rc;
+ }
+ break;
+ default:
+ if (o_ctrl->i2c_init_data.is_settings_valid == 0) {
+ CAM_DBG(CAM_OIS,
+ "Received init settings");
+ i2c_reg_settings =
+ &(o_ctrl->i2c_init_data);
+ i2c_reg_settings->is_settings_valid = 1;
+ i2c_reg_settings->request_id = 0;
+ rc = cam_sensor_i2c_command_parser(
+ &o_ctrl->io_master_info,
+ i2c_reg_settings,
+ &cmd_desc[i], 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS,
+ "init parsing failed: %d", rc);
+ return rc;
+ }
+ } else if ((o_ctrl->is_ois_calib != 0) &&
+ (o_ctrl->i2c_calib_data.is_settings_valid ==
+ 0)) {
+ CAM_DBG(CAM_OIS,
+ "Received calib settings");
+ i2c_reg_settings = &(o_ctrl->i2c_calib_data);
+ i2c_reg_settings->is_settings_valid = 1;
+ i2c_reg_settings->request_id = 0;
+ rc = cam_sensor_i2c_command_parser(
+ &o_ctrl->io_master_info,
+ i2c_reg_settings,
+ &cmd_desc[i], 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS,
+ "Calib parsing failed: %d", rc);
+ return rc;
+ }
+ }
+ break;
+ }
+ }
+
+ if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) {
+ rc = cam_ois_power_up(o_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_OIS, " OIS Power up failed");
+ return rc;
+ }
+ o_ctrl->cam_ois_state = CAM_OIS_CONFIG;
+ }
+
+ if (o_ctrl->ois_fw_flag) {
+ rc = cam_ois_fw_download(o_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "Failed OIS FW Download");
+ goto pwr_dwn;
+ }
+ }
+
+ rc = cam_ois_apply_settings(o_ctrl, &o_ctrl->i2c_init_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "Cannot apply Init settings");
+ goto pwr_dwn;
+ }
+
+ if (o_ctrl->is_ois_calib) {
+ rc = cam_ois_apply_settings(o_ctrl,
+ &o_ctrl->i2c_calib_data);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "Cannot apply calib data");
+ goto pwr_dwn;
+ }
+ }
+
+ rc = delete_request(&o_ctrl->i2c_init_data);
+ if (rc < 0) {
+ CAM_WARN(CAM_OIS,
+ "Fail deleting Init data: rc: %d", rc);
+ rc = 0;
+ }
+ rc = delete_request(&o_ctrl->i2c_calib_data);
+ if (rc < 0) {
+ CAM_WARN(CAM_OIS,
+ "Fail deleting Calibration data: rc: %d", rc);
+ rc = 0;
+ }
+ break;
+ case CAM_OIS_PACKET_OPCODE_OIS_CONTROL:
+ if (o_ctrl->cam_ois_state < CAM_OIS_CONFIG) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_OIS,
+ "Not in right state to control OIS: %d",
+ o_ctrl->cam_ois_state);
+ return rc;
+ }
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ i2c_reg_settings = &(o_ctrl->i2c_mode_data);
+ i2c_reg_settings->is_settings_valid = 1;
+ i2c_reg_settings->request_id = 0;
+ rc = cam_sensor_i2c_command_parser(&o_ctrl->io_master_info,
+ i2c_reg_settings,
+ cmd_desc, 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "OIS pkt parsing failed: %d", rc);
+ return rc;
+ }
+
+ rc = cam_ois_apply_settings(o_ctrl, i2c_reg_settings);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "Cannot apply mode settings");
+ return rc;
+ }
+
+ rc = delete_request(i2c_reg_settings);
+ if (rc < 0)
+ CAM_ERR(CAM_OIS,
+ "Fail deleting Mode data: rc: %d", rc);
+ break;
+ default:
+ break;
+ }
+ return rc;
+pwr_dwn:
+ cam_ois_power_down(o_ctrl);
+ return rc;
+}
+
+void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl)
+{
+ int rc = 0;
+ struct cam_ois_soc_private *soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+ if (o_ctrl->cam_ois_state == CAM_OIS_INIT)
+ return;
+
+ if (o_ctrl->cam_ois_state >= CAM_OIS_CONFIG) {
+ rc = cam_ois_power_down(o_ctrl);
+ if (rc < 0)
+ CAM_ERR(CAM_OIS, "OIS Power down failed");
+ o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
+ }
+
+ if (o_ctrl->cam_ois_state >= CAM_OIS_ACQUIRE) {
+ rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_OIS, "destroying the device hdl");
+ o_ctrl->bridge_intf.device_hdl = -1;
+ o_ctrl->bridge_intf.link_hdl = -1;
+ o_ctrl->bridge_intf.session_hdl = -1;
+ }
+
+ if (o_ctrl->i2c_mode_data.is_settings_valid == 1)
+ delete_request(&o_ctrl->i2c_mode_data);
+
+ if (o_ctrl->i2c_calib_data.is_settings_valid == 1)
+ delete_request(&o_ctrl->i2c_calib_data);
+
+ if (o_ctrl->i2c_init_data.is_settings_valid == 1)
+ delete_request(&o_ctrl->i2c_init_data);
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_down_setting_size = 0;
+ power_info->power_setting_size = 0;
+
+ o_ctrl->cam_ois_state = CAM_OIS_INIT;
+}
+
+/**
+ * cam_ois_driver_cmd - Handle ois cmds
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
+{
+ int rc = 0;
+ struct cam_ois_query_cap_t ois_cap = {0};
+ struct cam_control *cmd = (struct cam_control *)arg;
+ struct cam_ois_soc_private *soc_private = NULL;
+ struct cam_sensor_power_ctrl_t *power_info = NULL;
+
+ if (!o_ctrl || !cmd) {
+ CAM_ERR(CAM_OIS, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+ CAM_ERR(CAM_OIS, "Invalid handle type: %d",
+ cmd->handle_type);
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ mutex_lock(&(o_ctrl->ois_mutex));
+ switch (cmd->op_code) {
+ case CAM_QUERY_CAP:
+ ois_cap.slot_info = o_ctrl->soc_info.index;
+
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &ois_cap,
+ sizeof(struct cam_ois_query_cap_t))) {
+ CAM_ERR(CAM_OIS, "Failed Copy to User");
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ CAM_DBG(CAM_OIS, "ois_cap: ID: %d", ois_cap.slot_info);
+ break;
+ case CAM_ACQUIRE_DEV:
+ rc = cam_ois_get_dev_handle(o_ctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "Failed to acquire dev");
+ goto release_mutex;
+ }
+
+ o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
+ break;
+ case CAM_START_DEV:
+ if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_OIS,
+ "Not in right state for start : %d",
+ o_ctrl->cam_ois_state);
+ goto release_mutex;
+ }
+ o_ctrl->cam_ois_state = CAM_OIS_START;
+ break;
+ case CAM_CONFIG_DEV:
+ rc = cam_ois_pkt_parse(o_ctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "Failed in ois pkt Parsing");
+ goto release_mutex;
+ }
+ break;
+ case CAM_RELEASE_DEV:
+ if (o_ctrl->cam_ois_state == CAM_OIS_START) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_OIS,
+ "Cant release ois: in start state");
+ goto release_mutex;
+ }
+
+ if (o_ctrl->cam_ois_state == CAM_OIS_CONFIG) {
+ rc = cam_ois_power_down(o_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "OIS Power down failed");
+ goto release_mutex;
+ }
+ }
+
+ if (o_ctrl->bridge_intf.device_hdl == -1) {
+ CAM_ERR(CAM_OIS, "link hdl: %d device hdl: %d",
+ o_ctrl->bridge_intf.device_hdl,
+ o_ctrl->bridge_intf.link_hdl);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_OIS, "destroying the device hdl");
+ o_ctrl->bridge_intf.device_hdl = -1;
+ o_ctrl->bridge_intf.link_hdl = -1;
+ o_ctrl->bridge_intf.session_hdl = -1;
+ o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_down_setting_size = 0;
+ power_info->power_setting_size = 0;
+
+ if (o_ctrl->i2c_mode_data.is_settings_valid == 1)
+ delete_request(&o_ctrl->i2c_mode_data);
+
+ if (o_ctrl->i2c_calib_data.is_settings_valid == 1)
+ delete_request(&o_ctrl->i2c_calib_data);
+
+ if (o_ctrl->i2c_init_data.is_settings_valid == 1)
+ delete_request(&o_ctrl->i2c_init_data);
+
+ break;
+ case CAM_STOP_DEV:
+ if (o_ctrl->cam_ois_state != CAM_OIS_START) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_OIS,
+ "Not in right state for stop : %d",
+ o_ctrl->cam_ois_state);
+ }
+ o_ctrl->cam_ois_state = CAM_OIS_CONFIG;
+ break;
+ default:
+ CAM_ERR(CAM_OIS, "invalid opcode");
+ goto release_mutex;
+ }
+release_mutex:
+ mutex_unlock(&(o_ctrl->ois_mutex));
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_core.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_core.h
new file mode 100644
index 000000000000..d6f0ec564508
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_core.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_OIS_CORE_H_
+#define _CAM_OIS_CORE_H_
+
+#include <linux/cma.h>
+#include <linux/dma-contiguous.h>
+#include "cam_ois_dev.h"
+
+/**
+ * @power_info: power setting info to control the power
+ *
+ * This API construct the default ois power setting.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int32_t cam_ois_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info);
+
+
+int cam_ois_driver_cmd(struct cam_ois_ctrl_t *e_ctrl, void *arg);
+
+/**
+ * @o_ctrl: OIS ctrl structure
+ *
+ * This API handles the shutdown ioctl/close
+ */
+void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl);
+
+#endif
+/* _CAM_OIS_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_dev.c
new file mode 100644
index 000000000000..5d16a4e54d04
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_dev.c
@@ -0,0 +1,423 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_ois_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_ois_soc.h"
+#include "cam_ois_core.h"
+#include "cam_debug_util.h"
+
+static long cam_ois_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_ois_driver_cmd(o_ctrl, arg);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_ois_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_ois_ctrl_t *o_ctrl =
+ v4l2_get_subdevdata(sd);
+
+ if (!o_ctrl) {
+ CAM_ERR(CAM_OIS, "o_ctrl ptr is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(o_ctrl->ois_mutex));
+ cam_ois_shutdown(o_ctrl);
+ mutex_unlock(&(o_ctrl->ois_mutex));
+
+ return 0;
+}
+
+static int32_t cam_ois_update_i2c_info(struct cam_ois_ctrl_t *o_ctrl,
+ struct cam_ois_i2c_info_t *i2c_info)
+{
+ struct cam_sensor_cci_client *cci_client = NULL;
+
+ if (o_ctrl->io_master_info.master_type == CCI_MASTER) {
+ cci_client = o_ctrl->io_master_info.cci_client;
+ if (!cci_client) {
+ CAM_ERR(CAM_OIS, "failed: cci_client %pK",
+ cci_client);
+ return -EINVAL;
+ }
+ cci_client->cci_i2c_master = o_ctrl->cci_i2c_master;
+ cci_client->sid = (i2c_info->slave_addr) >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_ois_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_OIS,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_ois_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc) {
+ CAM_ERR(CAM_OIS,
+ "Failed in ois suddev handling rc %d",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_OIS, "Invalid compat ioctl: %d", cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_OIS,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops cam_ois_internal_ops = {
+ .close = cam_ois_subdev_close,
+};
+
+static struct v4l2_subdev_core_ops cam_ois_subdev_core_ops = {
+ .ioctl = cam_ois_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_ois_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_ois_subdev_ops = {
+ .core = &cam_ois_subdev_core_ops,
+};
+
+static int cam_ois_init_subdev_param(struct cam_ois_ctrl_t *o_ctrl)
+{
+ int rc = 0;
+
+ o_ctrl->v4l2_dev_str.internal_ops = &cam_ois_internal_ops;
+ o_ctrl->v4l2_dev_str.ops = &cam_ois_subdev_ops;
+ strlcpy(o_ctrl->device_name, CAM_OIS_NAME,
+ sizeof(o_ctrl->device_name));
+ o_ctrl->v4l2_dev_str.name = o_ctrl->device_name;
+ o_ctrl->v4l2_dev_str.sd_flags =
+ (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+ o_ctrl->v4l2_dev_str.ent_function = CAM_OIS_DEVICE_TYPE;
+ o_ctrl->v4l2_dev_str.token = o_ctrl;
+
+ rc = cam_register_subdev(&(o_ctrl->v4l2_dev_str));
+ if (rc)
+ CAM_ERR(CAM_OIS, "fail to create subdev");
+
+ return rc;
+}
+
+static int cam_ois_i2c_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct cam_ois_ctrl_t *o_ctrl = NULL;
+ struct cam_ois_soc_private *soc_private = NULL;
+
+ if (client == NULL || id == NULL) {
+ CAM_ERR(CAM_OIS, "Invalid Args client: %pK id: %pK",
+ client, id);
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ CAM_ERR(CAM_OIS, "i2c_check_functionality failed");
+ goto probe_failure;
+ }
+
+ o_ctrl = kzalloc(sizeof(*o_ctrl), GFP_KERNEL);
+ if (!o_ctrl) {
+ CAM_ERR(CAM_OIS, "kzalloc failed");
+ rc = -ENOMEM;
+ goto probe_failure;
+ }
+
+ i2c_set_clientdata(client, o_ctrl);
+
+ o_ctrl->soc_info.dev = &client->dev;
+ o_ctrl->soc_info.dev_name = client->name;
+ o_ctrl->ois_device_type = MSM_CAMERA_I2C_DEVICE;
+ o_ctrl->io_master_info.master_type = I2C_MASTER;
+ o_ctrl->io_master_info.client = client;
+
+ soc_private = kzalloc(sizeof(struct cam_ois_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto octrl_free;
+ }
+
+ o_ctrl->soc_info.soc_private = soc_private;
+ rc = cam_ois_driver_soc_init(o_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "failed: cam_sensor_parse_dt rc %d", rc);
+ goto soc_free;
+ }
+
+ rc = cam_ois_init_subdev_param(o_ctrl);
+ if (rc)
+ goto soc_free;
+
+ o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
+ return rc;
+
+soc_free:
+ kfree(soc_private);
+octrl_free:
+ kfree(o_ctrl);
+probe_failure:
+ return rc;
+}
+
+static int cam_ois_i2c_driver_remove(struct i2c_client *client)
+{
+ int i;
+ struct cam_ois_ctrl_t *o_ctrl = i2c_get_clientdata(client);
+ struct cam_hw_soc_info *soc_info;
+ struct cam_ois_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ if (!o_ctrl) {
+ CAM_ERR(CAM_OIS, "ois device is NULL");
+ return -EINVAL;
+ }
+
+ soc_info = &o_ctrl->soc_info;
+
+ for (i = 0; i < soc_info->num_clk; i++)
+ devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+ soc_private =
+ (struct cam_ois_soc_private *)soc_info->soc_private;
+ power_info = &soc_private->power_info;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ kfree(o_ctrl->soc_info.soc_private);
+ kfree(o_ctrl);
+
+ return 0;
+}
+
+static int32_t cam_ois_platform_driver_probe(
+ struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct cam_ois_ctrl_t *o_ctrl = NULL;
+ struct cam_ois_soc_private *soc_private = NULL;
+
+ o_ctrl = kzalloc(sizeof(struct cam_ois_ctrl_t), GFP_KERNEL);
+ if (!o_ctrl)
+ return -ENOMEM;
+
+ o_ctrl->soc_info.pdev = pdev;
+ o_ctrl->pdev = pdev;
+ o_ctrl->soc_info.dev = &pdev->dev;
+ o_ctrl->soc_info.dev_name = pdev->name;
+
+ o_ctrl->ois_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+
+ o_ctrl->io_master_info.master_type = CCI_MASTER;
+ o_ctrl->io_master_info.cci_client = kzalloc(
+ sizeof(struct cam_sensor_cci_client), GFP_KERNEL);
+ if (!o_ctrl->io_master_info.cci_client)
+ goto free_o_ctrl;
+
+ soc_private = kzalloc(sizeof(struct cam_ois_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto free_cci_client;
+ }
+ o_ctrl->soc_info.soc_private = soc_private;
+ soc_private->power_info.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&(o_ctrl->i2c_init_data.list_head));
+ INIT_LIST_HEAD(&(o_ctrl->i2c_calib_data.list_head));
+ INIT_LIST_HEAD(&(o_ctrl->i2c_mode_data.list_head));
+ mutex_init(&(o_ctrl->ois_mutex));
+ rc = cam_ois_driver_soc_init(o_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "failed: soc init rc %d", rc);
+ goto free_soc;
+ }
+
+ rc = cam_ois_init_subdev_param(o_ctrl);
+ if (rc)
+ goto free_soc;
+
+ rc = cam_ois_update_i2c_info(o_ctrl, &soc_private->i2c_info);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "failed: to update i2c info rc %d", rc);
+ goto unreg_subdev;
+ }
+ o_ctrl->bridge_intf.device_hdl = -1;
+
+ platform_set_drvdata(pdev, o_ctrl);
+ v4l2_set_subdevdata(&o_ctrl->v4l2_dev_str.sd, o_ctrl);
+
+ o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
+ return rc;
+unreg_subdev:
+ cam_unregister_subdev(&(o_ctrl->v4l2_dev_str));
+free_soc:
+ kfree(soc_private);
+free_cci_client:
+ kfree(o_ctrl->io_master_info.cci_client);
+free_o_ctrl:
+ kfree(o_ctrl);
+ return rc;
+}
+
+static int cam_ois_platform_driver_remove(struct platform_device *pdev)
+{
+ int i;
+ struct cam_ois_ctrl_t *o_ctrl;
+ struct cam_ois_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info;
+
+ o_ctrl = platform_get_drvdata(pdev);
+ if (!o_ctrl) {
+ CAM_ERR(CAM_OIS, "ois device is NULL");
+ return -EINVAL;
+ }
+
+ soc_info = &o_ctrl->soc_info;
+ for (i = 0; i < soc_info->num_clk; i++)
+ devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ kfree(o_ctrl->soc_info.soc_private);
+ kfree(o_ctrl->io_master_info.cci_client);
+ kfree(o_ctrl);
+ return 0;
+}
+
+static const struct of_device_id cam_ois_dt_match[] = {
+ { .compatible = "qcom,ois" },
+ { }
+};
+
+
+MODULE_DEVICE_TABLE(of, cam_ois_dt_match);
+
+static struct platform_driver cam_ois_platform_driver = {
+ .driver = {
+ .name = "qcom,ois",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_ois_dt_match,
+ },
+ .probe = cam_ois_platform_driver_probe,
+ .remove = cam_ois_platform_driver_remove,
+};
+static const struct i2c_device_id cam_ois_i2c_id[] = {
+ { "msm_ois", (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver cam_ois_i2c_driver = {
+ .id_table = cam_ois_i2c_id,
+ .probe = cam_ois_i2c_driver_probe,
+ .remove = cam_ois_i2c_driver_remove,
+ .driver = {
+ .name = "msm_ois",
+ },
+};
+
+static struct cam_ois_registered_driver_t registered_driver = {
+ 0, 0};
+
+static int __init cam_ois_driver_init(void)
+{
+ int rc = 0;
+
+ rc = platform_driver_register(&cam_ois_platform_driver);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "platform_driver_register failed rc = %d",
+ rc);
+ return rc;
+ }
+
+ registered_driver.platform_driver = 1;
+
+ rc = i2c_add_driver(&cam_ois_i2c_driver);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "i2c_add_driver failed rc = %d", rc);
+ return rc;
+ }
+
+ registered_driver.i2c_driver = 1;
+ return rc;
+}
+
+static void __exit cam_ois_driver_exit(void)
+{
+ if (registered_driver.platform_driver)
+ platform_driver_unregister(&cam_ois_platform_driver);
+
+ if (registered_driver.i2c_driver)
+ i2c_del_driver(&cam_ois_i2c_driver);
+}
+
+module_init(cam_ois_driver_init);
+module_exit(cam_ois_driver_exit);
+MODULE_DESCRIPTION("CAM OIS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_dev.h
new file mode 100644
index 000000000000..ea648594ccc0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_dev.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_OIS_DEV_H_
+#define _CAM_OIS_DEV_H_
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_sensor.h>
+#include <cam_sensor_i2c.h>
+#include <cam_sensor_spi.h>
+#include <cam_sensor_io.h>
+#include <cam_cci_dev.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include <cam_subdev.h>
+#include "cam_soc_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum cam_ois_state {
+ CAM_OIS_INIT,
+ CAM_OIS_ACQUIRE,
+ CAM_OIS_CONFIG,
+ CAM_OIS_START,
+};
+
+/**
+ * struct cam_ois_registered_driver_t - registered driver info
+ * @platform_driver : flag indicates if platform driver is registered
+ * @i2c_driver : flag indicates if i2c driver is registered
+ *
+ */
+struct cam_ois_registered_driver_t {
+ bool platform_driver;
+ bool i2c_driver;
+};
+
+/**
+ * struct cam_ois_i2c_info_t - I2C info
+ * @slave_addr : slave address
+ * @i2c_freq_mode : i2c frequency mode
+ *
+ */
+struct cam_ois_i2c_info_t {
+ uint16_t slave_addr;
+ uint8_t i2c_freq_mode;
+};
+
+/**
+ * struct cam_ois_soc_private - ois soc private data structure
+ * @ois_name : ois name
+ * @i2c_info : i2c info structure
+ * @power_info : ois power info
+ *
+ */
+struct cam_ois_soc_private {
+ const char *ois_name;
+ struct cam_ois_i2c_info_t i2c_info;
+ struct cam_sensor_power_ctrl_t power_info;
+};
+
+/**
+ * struct cam_ois_intf_params - bridge interface params
+ * @device_hdl : Device Handle
+ * @session_hdl : Session Handle
+ * @ops : KMD operations
+ * @crm_cb : Callback API pointers
+ */
+struct cam_ois_intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_ois_ctrl_t - OIS ctrl private data
+ * @pdev : platform device
+ * @ois_mutex : ois mutex
+ * @soc_info : ois soc related info
+ * @io_master_info : Information about the communication master
+ * @cci_i2c_master : I2C structure
+ * @v4l2_dev_str : V4L2 device structure
+ * @bridge_intf : bridge interface params
+ * @i2c_init_data : ois i2c init settings
+ * @i2c_mode_data : ois i2c mode settings
+ * @i2c_calib_data : ois i2c calib settings
+ * @ois_device_type : ois device type
+ * @cam_ois_state : ois_device_state
+ * @ois_name : ois name
+ * @ois_fw_flag : flag for firmware download
+ * @is_ois_calib : flag for Calibration data
+ * @opcode : ois opcode
+ * @device_name : Device name
+ *
+ */
+struct cam_ois_ctrl_t {
+ struct platform_device *pdev;
+ struct mutex ois_mutex;
+ struct cam_hw_soc_info soc_info;
+ struct camera_io_master io_master_info;
+ enum cci_i2c_master_t cci_i2c_master;
+ enum cci_device_num cci_num;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_ois_intf_params bridge_intf;
+ struct i2c_settings_array i2c_init_data;
+ struct i2c_settings_array i2c_calib_data;
+ struct i2c_settings_array i2c_mode_data;
+ enum msm_camera_device_type_t ois_device_type;
+ enum cam_ois_state cam_ois_state;
+ char device_name[20];
+ char ois_name[32];
+ uint8_t ois_fw_flag;
+ uint8_t is_ois_calib;
+ struct cam_ois_opcode opcode;
+};
+
+#endif /*_CAM_OIS_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_soc.c
new file mode 100644
index 000000000000..d090ebb432b5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_soc.c
@@ -0,0 +1,129 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+
+#include "cam_ois_soc.h"
+#include "cam_debug_util.h"
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * Parses ois dt
+ */
+static int cam_ois_get_dt_data(struct cam_ois_ctrl_t *o_ctrl)
+{
+ int i, rc = 0;
+ struct cam_hw_soc_info *soc_info = &o_ctrl->soc_info;
+ struct cam_ois_soc_private *soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+ struct device_node *of_node = NULL;
+
+ of_node = soc_info->dev->of_node;
+
+ if (!of_node) {
+ CAM_ERR(CAM_OIS, "of_node is NULL, device type %d",
+ o_ctrl->ois_device_type);
+ return -EINVAL;
+ }
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "cam_soc_util_get_dt_properties rc %d",
+ rc);
+ return rc;
+ }
+
+ if (!soc_info->gpio_data) {
+ CAM_INFO(CAM_OIS, "No GPIO found");
+ return 0;
+ }
+
+ if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+ CAM_INFO(CAM_OIS, "No GPIO found");
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &power_info->gpio_num_info);
+ if ((rc < 0) || (!power_info->gpio_num_info)) {
+ CAM_ERR(CAM_OIS, "No/Error OIS GPIOs");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < soc_info->num_clk; i++) {
+ soc_info->clk[i] = devm_clk_get(soc_info->dev,
+ soc_info->clk_name[i]);
+ if (!soc_info->clk[i]) {
+ CAM_ERR(CAM_SENSOR, "get failed for %s",
+ soc_info->clk_name[i]);
+ rc = -ENOENT;
+ return rc;
+ }
+ }
+
+ return rc;
+}
+/**
+ * @o_ctrl: ctrl structure
+ *
+ * This function is called from cam_ois_platform/i2c_driver_probe, it parses
+ * the ois dt node.
+ */
+int cam_ois_driver_soc_init(struct cam_ois_ctrl_t *o_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &o_ctrl->soc_info;
+ struct device_node *of_node = NULL;
+
+ if (!soc_info->dev) {
+ CAM_ERR(CAM_OIS, "soc_info is not initialized");
+ return -EINVAL;
+ }
+
+ of_node = soc_info->dev->of_node;
+ if (!of_node) {
+ CAM_ERR(CAM_OIS, "dev.of_node NULL");
+ return -EINVAL;
+ }
+
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = of_property_read_u32(of_node, "cci-master",
+ &o_ctrl->cci_i2c_master);
+ if (rc < 0) {
+ CAM_DBG(CAM_OIS, "failed rc %d", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "cci-device",
+ &o_ctrl->cci_num);
+ CAM_DBG(CAM_ACTUATOR, "cci-device %d, rc %d",
+ o_ctrl->cci_num, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ o_ctrl->cci_num = CCI_DEVICE_0;
+ rc = 0;
+ }
+ o_ctrl->io_master_info.cci_client->cci_device = o_ctrl->cci_num;
+ }
+
+ rc = cam_ois_get_dt_data(o_ctrl);
+ if (rc < 0)
+ CAM_DBG(CAM_OIS, "failed: ois get dt data rc %d", rc);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_soc.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_soc.h
new file mode 100644
index 000000000000..aaf21a01a3a9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_ois/cam_ois_soc.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_OIS_SOC_H_
+#define _CAM_OIS_SOC_H_
+
+#include "cam_ois_dev.h"
+
+int cam_ois_driver_soc_init(struct cam_ois_ctrl_t *o_ctrl);
+
+#endif/* _CAM_OIS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/Makefile
new file mode 100644
index 000000000000..b747d9a300b4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_res_mgr.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
new file mode 100644
index 000000000000..d03faef4d3a6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
@@ -0,0 +1,738 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include "cam_debug_util.h"
+#include "cam_res_mgr_api.h"
+#include "cam_res_mgr_private.h"
+
+static struct cam_res_mgr *cam_res;
+
+static void cam_res_mgr_free_res(void)
+{
+ struct cam_dev_res *dev_res, *dev_temp;
+ struct cam_gpio_res *gpio_res, *gpio_temp;
+ struct cam_flash_res *flash_res, *flash_temp;
+
+ if (!cam_res)
+ return;
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry_safe(gpio_res, gpio_temp,
+ &cam_res->gpio_res_list, list) {
+ list_for_each_entry_safe(dev_res, dev_temp,
+ &gpio_res->dev_list, list) {
+ list_del_init(&dev_res->list);
+ kfree(dev_res);
+ }
+ list_del_init(&gpio_res->list);
+ kfree(gpio_res);
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_for_each_entry_safe(flash_res, flash_temp,
+ &cam_res->flash_res_list, list) {
+ list_del_init(&flash_res->list);
+ kfree(flash_res);
+ }
+ mutex_unlock(&cam_res->flash_res_lock);
+
+ mutex_lock(&cam_res->clk_res_lock);
+ cam_res->shared_clk_ref_count = 0;
+ mutex_unlock(&cam_res->clk_res_lock);
+}
+
+void cam_res_mgr_led_trigger_register(const char *name, struct led_trigger **tp)
+{
+ bool found = false;
+ struct cam_flash_res *flash_res;
+
+ if (!cam_res) {
+ /*
+ * If this driver not probed, then just register the
+ * led trigger.
+ */
+ led_trigger_register_simple(name, tp);
+ return;
+ }
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+ if (!strcmp(flash_res->name, name)) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->flash_res_lock);
+
+ if (found) {
+ *tp = flash_res->trigger;
+ } else {
+ flash_res = kzalloc(sizeof(struct cam_flash_res), GFP_KERNEL);
+ if (!flash_res) {
+ CAM_ERR(CAM_RES,
+ "Failed to malloc memory for flash_res:%s",
+ name);
+ *tp = NULL;
+ return;
+ }
+
+ led_trigger_register_simple(name, tp);
+ INIT_LIST_HEAD(&flash_res->list);
+ flash_res->trigger = *tp;
+ flash_res->name = name;
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_add_tail(&flash_res->list, &cam_res->flash_res_list);
+ mutex_unlock(&cam_res->flash_res_lock);
+ }
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_register);
+
+void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp)
+{
+ bool found = false;
+ struct cam_flash_res *flash_res;
+
+ if (!cam_res) {
+ /*
+ * If this driver not probed, then just unregister the
+ * led trigger.
+ */
+ led_trigger_unregister_simple(tp);
+ return;
+ }
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+ if (flash_res->trigger == tp) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ led_trigger_unregister_simple(tp);
+ list_del_init(&flash_res->list);
+ kfree(flash_res);
+ }
+ mutex_unlock(&cam_res->flash_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_unregister);
+
+void cam_res_mgr_led_trigger_event(struct led_trigger *trig,
+ enum led_brightness brightness)
+{
+ bool found = false;
+ struct cam_flash_res *flash_res;
+
+ if (!cam_res) {
+ /*
+ * If this driver not probed, then just trigger
+ * the led event.
+ */
+ led_trigger_event(trig, brightness);
+ return;
+ }
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+ if (flash_res->trigger == trig) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->flash_res_lock);
+
+ if (found)
+ led_trigger_event(trig, brightness);
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_event);
+
+int cam_res_mgr_shared_pinctrl_init(void)
+{
+ struct cam_soc_pinctrl_info *pinctrl_info;
+
+ /*
+ * We allow the cam_res is NULL or shared_gpio_enabled
+ * is false, it means this driver no probed or doesn't
+ * have shared gpio in this device.
+ */
+ if (!cam_res || !cam_res->shared_gpio_enabled) {
+ CAM_DBG(CAM_RES, "Not support shared gpio.");
+ return 0;
+ }
+
+ if (cam_res->pstatus != PINCTRL_STATUS_PUT) {
+ CAM_DBG(CAM_RES, "The shared pinctrl already been got.");
+ return 0;
+ }
+
+ pinctrl_info = &cam_res->dt.pinctrl_info;
+
+ pinctrl_info->pinctrl =
+ devm_pinctrl_get(cam_res->dev);
+ if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) {
+ CAM_ERR(CAM_RES, "Pinctrl not available");
+ cam_res->shared_gpio_enabled = false;
+ return -EINVAL;
+ }
+
+ pinctrl_info->gpio_state_active =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ CAM_RES_MGR_DEFAULT);
+ if (IS_ERR_OR_NULL(pinctrl_info->gpio_state_active)) {
+ CAM_ERR(CAM_RES,
+ "Failed to get the active state pinctrl handle");
+ cam_res->shared_gpio_enabled = false;
+ return -EINVAL;
+ }
+
+ pinctrl_info->gpio_state_suspend =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ CAM_RES_MGR_SLEEP);
+ if (IS_ERR_OR_NULL(pinctrl_info->gpio_state_suspend)) {
+ CAM_ERR(CAM_RES,
+ "Failed to get the active state pinctrl handle");
+ cam_res->shared_gpio_enabled = false;
+ return -EINVAL;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ cam_res->pstatus = PINCTRL_STATUS_GOT;
+ mutex_unlock(&cam_res->gpio_res_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_init);
+
+static bool cam_res_mgr_shared_pinctrl_check_hold(void)
+{
+ int index = 0;
+ int dev_num = 0;
+ bool hold = false;
+ struct list_head *list;
+ struct cam_gpio_res *gpio_res;
+ struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+ for (; index < dt->num_shared_gpio; index++) {
+ list_for_each_entry(gpio_res,
+ &cam_res->gpio_res_list, list) {
+
+ if (gpio_res->gpio ==
+ dt->shared_gpio[index]) {
+ list_for_each(list, &gpio_res->dev_list)
+ dev_num++;
+
+ if (dev_num >= 2) {
+ hold = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (cam_res->shared_clk_ref_count > 1)
+ hold = true;
+
+ return hold;
+}
+
+void cam_res_mgr_shared_pinctrl_put(void)
+{
+ struct cam_soc_pinctrl_info *pinctrl_info;
+
+ if (!cam_res || !cam_res->shared_gpio_enabled) {
+ CAM_DBG(CAM_RES, "Not support shared gpio.");
+ return;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+ CAM_DBG(CAM_RES, "The shared pinctrl already been put");
+ mutex_unlock(&cam_res->gpio_res_lock);
+ return;
+ }
+
+ if (cam_res_mgr_shared_pinctrl_check_hold()) {
+ CAM_INFO(CAM_RES, "Need hold put this pinctrl");
+ mutex_unlock(&cam_res->gpio_res_lock);
+ return;
+ }
+
+ pinctrl_info = &cam_res->dt.pinctrl_info;
+
+ devm_pinctrl_put(pinctrl_info->pinctrl);
+
+ cam_res->pstatus = PINCTRL_STATUS_PUT;
+ mutex_unlock(&cam_res->gpio_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_put);
+
+int cam_res_mgr_shared_pinctrl_select_state(bool active)
+{
+ int rc = 0;
+ struct cam_soc_pinctrl_info *pinctrl_info;
+
+ if (!cam_res || !cam_res->shared_gpio_enabled) {
+ CAM_DBG(CAM_RES, "Not support shared gpio.");
+ return 0;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+ CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!");
+ mutex_unlock(&cam_res->gpio_res_lock);
+ return 0;
+ }
+
+ pinctrl_info = &cam_res->dt.pinctrl_info;
+
+ if (active && (cam_res->pstatus != PINCTRL_STATUS_ACTIVE)) {
+ rc = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->gpio_state_active);
+ cam_res->pstatus = PINCTRL_STATUS_ACTIVE;
+ } else if (!active &&
+ !cam_res_mgr_shared_pinctrl_check_hold()) {
+ rc = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->gpio_state_suspend);
+ cam_res->pstatus = PINCTRL_STATUS_SUSPEND;
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_select_state);
+
+int cam_res_mgr_shared_pinctrl_post_init(void)
+{
+ int ret = 0;
+ struct cam_soc_pinctrl_info *pinctrl_info;
+
+ if (!cam_res || !cam_res->shared_gpio_enabled) {
+ CAM_DBG(CAM_RES, "Not support shared gpio.");
+ return ret;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+ CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!");
+ mutex_unlock(&cam_res->gpio_res_lock);
+ return ret;
+ }
+
+ pinctrl_info = &cam_res->dt.pinctrl_info;
+
+ /*
+ * If no gpio resource in gpio_res_list, and
+ * no shared clk now, it means this device
+ * don't have shared gpio.
+ */
+ if (list_empty(&cam_res->gpio_res_list) &&
+ cam_res->shared_clk_ref_count < 1) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->gpio_state_suspend);
+ devm_pinctrl_put(pinctrl_info->pinctrl);
+ cam_res->pstatus = PINCTRL_STATUS_PUT;
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_post_init);
+
+static int cam_res_mgr_add_device(struct device *dev,
+ struct cam_gpio_res *gpio_res)
+{
+ struct cam_dev_res *dev_res = NULL;
+
+ dev_res = kzalloc(sizeof(struct cam_dev_res), GFP_KERNEL);
+ if (!dev_res)
+ return -ENOMEM;
+
+ dev_res->dev = dev;
+ INIT_LIST_HEAD(&dev_res->list);
+
+ list_add_tail(&dev_res->list, &gpio_res->dev_list);
+
+ return 0;
+}
+
+static bool cam_res_mgr_gpio_is_shared(uint gpio)
+{
+ int index = 0;
+ bool found = false;
+ struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+ for (; index < dt->num_shared_gpio; index++) {
+ if (gpio == dt->shared_gpio[index]) {
+ found = true;
+ break;
+ }
+ }
+
+ return found;
+}
+
+int cam_res_mgr_gpio_request(struct device *dev, uint gpio,
+ unsigned long flags, const char *label)
+{
+ int rc = 0;
+ bool found = false;
+ struct cam_gpio_res *gpio_res = NULL;
+
+ if (cam_res && cam_res->shared_gpio_enabled) {
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+ if (gpio == gpio_res->gpio) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ /*
+ * found equal to false has two situation:
+ * 1. shared gpio not enabled
+ * 2. shared gpio enabled, but not find this gpio
+ * from the gpio_res_list
+ * These two situation both need request gpio.
+ */
+ if (!found) {
+ rc = gpio_request_one(gpio, flags, label);
+ if (rc) {
+ CAM_ERR(CAM_RES, "gpio %d:%s request fails",
+ gpio, label);
+ return rc;
+ }
+ }
+
+ /*
+ * If the gpio is in the shared list, and not find
+ * from gpio_res_list, then insert a cam_gpio_res
+ * to gpio_res_list.
+ */
+ if (!found && cam_res
+ && cam_res->shared_gpio_enabled &&
+ cam_res_mgr_gpio_is_shared(gpio)) {
+
+ gpio_res = kzalloc(sizeof(struct cam_gpio_res), GFP_KERNEL);
+ if (!gpio_res)
+ return -ENOMEM;
+
+ gpio_res->gpio = gpio;
+ gpio_res->power_on_count = 0;
+ INIT_LIST_HEAD(&gpio_res->list);
+ INIT_LIST_HEAD(&gpio_res->dev_list);
+
+ rc = cam_res_mgr_add_device(dev, gpio_res);
+ if (rc) {
+ kfree(gpio_res);
+ return rc;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_add_tail(&gpio_res->list, &cam_res->gpio_res_list);
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ if (found && cam_res
+ && cam_res->shared_gpio_enabled) {
+ struct cam_dev_res *dev_res = NULL;
+
+ found = 0;
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry(dev_res, &gpio_res->dev_list, list) {
+ if (dev_res->dev == dev) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ rc = cam_res_mgr_add_device(dev, gpio_res);
+
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_request);
+
+static void cam_res_mgr_gpio_free(struct device *dev, uint gpio)
+{
+ bool found = false;
+ bool need_free = true;
+ int dev_num = 0;
+ struct cam_gpio_res *gpio_res = NULL;
+
+ if (cam_res && cam_res->shared_gpio_enabled) {
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+ if (gpio == gpio_res->gpio) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ if (found && cam_res
+ && cam_res->shared_gpio_enabled) {
+ struct list_head *list;
+ struct cam_dev_res *dev_res = NULL;
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ /* Count the dev number in the dev_list */
+ list_for_each(list, &gpio_res->dev_list)
+ dev_num++;
+
+ /*
+ * Need free the gpio if only has last 1 device
+ * in the dev_list, otherwise, not free this
+ * gpio.
+ */
+ if (dev_num == 1) {
+ dev_res = list_first_entry(&gpio_res->dev_list,
+ struct cam_dev_res, list);
+ list_del_init(&dev_res->list);
+ kfree(dev_res);
+
+ list_del_init(&gpio_res->list);
+ kfree(gpio_res);
+ } else {
+ list_for_each_entry(dev_res,
+ &gpio_res->dev_list, list) {
+ if (dev_res->dev == dev) {
+ list_del_init(&dev_res->list);
+ kfree(dev_res);
+ need_free = false;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ if (need_free)
+ gpio_free(gpio);
+}
+
+void cam_res_mgr_gpio_free_arry(struct device *dev,
+ const struct gpio *array, size_t num)
+{
+ while (num--)
+ cam_res_mgr_gpio_free(dev, (array[num]).gpio);
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_free_arry);
+
+int cam_res_mgr_gpio_set_value(unsigned int gpio, int value)
+{
+ int rc = 0;
+ bool found = false;
+ struct cam_gpio_res *gpio_res = NULL;
+
+ if (cam_res && cam_res->shared_gpio_enabled) {
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+ if (gpio == gpio_res->gpio) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ /*
+ * Set the value directly if can't find the gpio from
+ * gpio_res_list, otherwise, need add ref count support
+ **/
+ if (!found) {
+ gpio_set_value_cansleep(gpio, value);
+ } else {
+ if (value) {
+ gpio_res->power_on_count++;
+ if (gpio_res->power_on_count < 2) {
+ gpio_set_value_cansleep(gpio, value);
+ CAM_DBG(CAM_RES,
+ "Shared GPIO(%d) : HIGH", gpio);
+ }
+ } else {
+ gpio_res->power_on_count--;
+ if (gpio_res->power_on_count < 1) {
+ gpio_set_value_cansleep(gpio, value);
+ CAM_DBG(CAM_RES,
+ "Shared GPIO(%d) : LOW", gpio);
+ }
+ }
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_set_value);
+
+void cam_res_mgr_shared_clk_config(bool value)
+{
+ if (!cam_res)
+ return;
+
+ mutex_lock(&cam_res->clk_res_lock);
+ if (value)
+ cam_res->shared_clk_ref_count++;
+ else
+ cam_res->shared_clk_ref_count--;
+ mutex_unlock(&cam_res->clk_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_clk_config);
+
+static int cam_res_mgr_parse_dt(struct device *dev)
+{
+ int rc = 0;
+ struct device_node *of_node = NULL;
+ struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+ of_node = dev->of_node;
+
+ dt->num_shared_gpio = of_property_count_u32_elems(of_node,
+ "shared-gpios");
+
+ if (dt->num_shared_gpio > MAX_SHARED_GPIO_SIZE ||
+ dt->num_shared_gpio <= 0) {
+ /*
+ * Not really an error, it means dtsi not configure
+ * the shared gpio.
+ */
+ CAM_DBG(CAM_RES, "Invalid GPIO number %d. No shared gpio.",
+ dt->num_shared_gpio);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "shared-gpios",
+ dt->shared_gpio, dt->num_shared_gpio);
+ if (rc) {
+ CAM_ERR(CAM_RES, "Get shared gpio array failed.");
+ return -EINVAL;
+ }
+
+ dt->pinctrl_info.pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(dt->pinctrl_info.pinctrl)) {
+ CAM_ERR(CAM_RES, "Pinctrl not available");
+ return -EINVAL;
+ }
+
+ /*
+ * Check the pinctrl state to make sure the gpio
+ * shared enabled.
+ */
+ dt->pinctrl_info.gpio_state_active =
+ pinctrl_lookup_state(dt->pinctrl_info.pinctrl,
+ CAM_RES_MGR_DEFAULT);
+ if (IS_ERR_OR_NULL(dt->pinctrl_info.gpio_state_active)) {
+ CAM_ERR(CAM_RES,
+ "Failed to get the active state pinctrl handle");
+ return -EINVAL;
+ }
+
+ dt->pinctrl_info.gpio_state_suspend =
+ pinctrl_lookup_state(dt->pinctrl_info.pinctrl,
+ CAM_RES_MGR_SLEEP);
+ if (IS_ERR_OR_NULL(dt->pinctrl_info.gpio_state_suspend)) {
+ CAM_ERR(CAM_RES,
+ "Failed to get the active state pinctrl handle");
+ return -EINVAL;
+ }
+
+ devm_pinctrl_put(dt->pinctrl_info.pinctrl);
+
+ return rc;
+}
+
+static int cam_res_mgr_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ cam_res = kzalloc(sizeof(*cam_res), GFP_KERNEL);
+ if (!cam_res)
+ return -ENOMEM;
+
+ cam_res->dev = &pdev->dev;
+ mutex_init(&cam_res->flash_res_lock);
+ mutex_init(&cam_res->gpio_res_lock);
+ mutex_init(&cam_res->clk_res_lock);
+
+ rc = cam_res_mgr_parse_dt(&pdev->dev);
+ if (rc) {
+ CAM_INFO(CAM_RES, "Disable shared gpio support.");
+ cam_res->shared_gpio_enabled = false;
+ } else {
+ CAM_INFO(CAM_RES, "Enable shared gpio support.");
+ cam_res->shared_gpio_enabled = true;
+ }
+
+ cam_res->shared_clk_ref_count = 0;
+ cam_res->pstatus = PINCTRL_STATUS_PUT;
+
+ INIT_LIST_HEAD(&cam_res->gpio_res_list);
+ INIT_LIST_HEAD(&cam_res->flash_res_list);
+
+ return 0;
+}
+
+static int cam_res_mgr_remove(struct platform_device *pdev)
+{
+ if (cam_res) {
+ cam_res_mgr_free_res();
+ kfree(cam_res);
+ cam_res = NULL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cam_res_mgr_dt_match[] = {
+ {.compatible = "qcom,cam-res-mgr"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_res_mgr_dt_match);
+
+static struct platform_driver cam_res_mgr_driver = {
+ .probe = cam_res_mgr_probe,
+ .remove = cam_res_mgr_remove,
+ .driver = {
+ .name = "cam_res_mgr",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_res_mgr_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_res_mgr_init(void)
+{
+ return platform_driver_register(&cam_res_mgr_driver);
+}
+
+static void __exit cam_res_mgr_exit(void)
+{
+ platform_driver_unregister(&cam_res_mgr_driver);
+}
+
+module_init(cam_res_mgr_init);
+module_exit(cam_res_mgr_exit);
+MODULE_DESCRIPTION("Camera resource manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
new file mode 100644
index 000000000000..379fd97fd6f5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
@@ -0,0 +1,148 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_RES_MGR_API_H__
+#define __CAM_RES_MGR_API_H__
+
+#include <linux/leds.h>
+
+/**
+ * @brief: Register the led trigger
+ *
+ * The newly registered led trigger is assigned to flash_res_list.
+ *
+ * @name : Pointer to int led trigger name
+ * @tp : Save the returned led trigger
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_register(const char *name,
+ struct led_trigger **tp);
+
+/**
+ * @brief: Unregister the led trigger
+ *
+ * Free the flash_res if this led trigger isn't used by other device .
+ *
+ * @tp : Pointer to the led trigger
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp);
+
+/**
+ * @brief: Trigger the event to led core
+ *
+ * @trig : Pointer to the led trigger
+ * @brightness : The brightness need to fire
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_event(struct led_trigger *trig,
+ enum led_brightness brightness);
+
+/**
+ * @brief: Get the corresponding pinctrl of dev
+ *
+ * Init the shared pinctrl if shared pinctrl enabled.
+ *
+ * @return None
+ */
+int cam_res_mgr_shared_pinctrl_init(void);
+
+/**
+ * @brief: Put the pinctrl
+ *
+ * Put the shared pinctrl.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void cam_res_mgr_shared_pinctrl_put(void);
+
+/**
+ * @brief: Select the corresponding state
+ *
+ * Active state can be selected directly, but need hold to suspend the
+ * pinctrl if the gpios in this pinctrl also held by other pinctrl.
+ *
+ * @active : The flag to indicate whether active or suspend
+ * the shared pinctrl.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_shared_pinctrl_select_state(bool active);
+
+/**
+ * @brief: Post init shared pinctrl
+ *
+ * Post init to check if the device really has shared gpio,
+ * suspend and put the pinctrl if not use shared gpio.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_shared_pinctrl_post_init(void);
+
+/**
+ * @brief: Request a gpio
+ *
+ * Will alloc a gpio_res for the new gpio, other find the corresponding
+ * gpio_res.
+ *
+ * @dev : Pointer to the device
+ * @gpio : The GPIO number
+ * @flags : GPIO configuration as specified by GPIOF_*
+ * @label : A literal description string of this GPIO
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_gpio_request(struct device *dev, unsigned int gpio,
+ unsigned long flags, const char *label);
+
+/**
+ * @brief: Free a array GPIO
+ *
+ * Free the GPIOs and release corresponding gpio_res.
+ *
+ * @dev : Pointer to the device
+ * @gpio : Array of the GPIO number
+ * @num : The number of gpio
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void cam_res_mgr_gpio_free_arry(struct device *dev,
+ const struct gpio *array, size_t num);
+
+/**
+ * @brief: Set GPIO power level
+ *
+ * Add ref count support for shared GPIOs.
+ *
+ * @gpio : The GPIO number
+ * @value : The power level need to setup
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ * -EINVAL will be returned if the gpio can't be found in gpio_res_list.
+ */
+int cam_res_mgr_gpio_set_value(unsigned int gpio, int value);
+
+/**
+ * @brief: Config the shared clk ref count
+ *
+ * Config the shared clk ref count..
+ *
+ * @value : get or put the shared clk.
+ *
+ * @return None
+ */
+void cam_res_mgr_shared_clk_config(bool value);
+
+#endif /* __CAM_RES_MGR_API_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
new file mode 100644
index 000000000000..6534f4b97ddf
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
@@ -0,0 +1,117 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_RES_MGR_PRIVATE_H__
+#define __CAM_RES_MGR_PRIVATE_H__
+
+#include <linux/list.h>
+#include <linux/leds.h>
+#include "cam_soc_util.h"
+
+#define MAX_SHARED_GPIO_SIZE 16
+
+/* pinctrl states name */
+#define CAM_RES_MGR_SLEEP "cam_res_mgr_suspend"
+#define CAM_RES_MGR_DEFAULT "cam_res_mgr_default"
+
+/**
+ * enum pinctrl_status - Enum for pinctrl status
+ */
+enum pinctrl_status {
+ PINCTRL_STATUS_GOT = 0,
+ PINCTRL_STATUS_ACTIVE,
+ PINCTRL_STATUS_SUSPEND,
+ PINCTRL_STATUS_PUT,
+};
+
+/**
+ * struct cam_dev_res
+ *
+ * @list : List member used to append this node to a dev list
+ * @dev : Device pointer associated with device
+ */
+struct cam_dev_res {
+ struct list_head list;
+ struct device *dev;
+};
+
+/**
+ * struct cam_gpio_res
+ *
+ * @list : List member used to append this node to a gpio list
+ * @dev_list : List the device which request this gpio
+ * @gpio : Gpio value
+ * @power_on_count : Record the power on times of this gpio
+ */
+struct cam_gpio_res {
+ struct list_head list;
+ struct list_head dev_list;
+ unsigned int gpio;
+ int power_on_count;
+};
+
+/**
+ * struct cam_pinctrl_res
+ *
+ * @list : List member used to append this node to a linked list
+ * @name : Pointer to the flash trigger's name.
+ * @trigger : Pointer to the flash trigger
+ */
+struct cam_flash_res {
+ struct list_head list;
+ const char *name;
+ struct led_trigger *trigger;
+};
+
+/**
+ * struct cam_res_mgr_dt
+ *
+ * @shared_gpio : Shared gpios list in the device tree
+ * @num_shared_gpio : The number of shared gpio
+ * @pinctrl_info : Pinctrl information
+ */
+struct cam_res_mgr_dt {
+ uint shared_gpio[MAX_SHARED_GPIO_SIZE];
+ int num_shared_gpio;
+ struct cam_soc_pinctrl_info pinctrl_info;
+};
+
+/**
+ * struct cam_pinctrl_res
+ *
+ * @dev : Pointer to the device
+ * @dt : Device tree resource
+ * @shared_gpio_enabled : The flag to indicate if support shared gpio
+ * @pstatus : Shared pinctrl status
+ * @gpio_res_list : List head of the gpio resource
+ * @flash_res_list : List head of the flash resource
+ * @gpio_res_lock : GPIO resource lock
+ * @flash_res_lock : Flash resource lock
+ * @clk_res_lock : Clk resource lock
+ */
+struct cam_res_mgr {
+ struct device *dev;
+ struct cam_res_mgr_dt dt;
+
+ bool shared_gpio_enabled;
+ enum pinctrl_status pstatus;
+
+ uint shared_clk_ref_count;
+
+ struct list_head gpio_res_list;
+ struct list_head flash_res_list;
+ struct mutex gpio_res_lock;
+ struct mutex flash_res_lock;
+ struct mutex clk_res_lock;
+};
+
+#endif /* __CAM_RES_MGR_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/Makefile
new file mode 100644
index 000000000000..46bddb5e3b38
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_dev.o cam_sensor_core.o cam_sensor_soc.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_core.c
new file mode 100644
index 000000000000..2117cc1f9587
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -0,0 +1,1254 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <cam_sensor_cmn_header.h>
+#include "cam_sensor_core.h"
+#include "cam_sensor_util.h"
+#include "cam_soc_util.h"
+#include "cam_trace.h"
+#include "cam_common_util.h"
+
+static void cam_sensor_update_req_mgr(
+ struct cam_sensor_ctrl_t *s_ctrl,
+ struct cam_packet *csl_packet)
+{
+ struct cam_req_mgr_add_request add_req;
+
+ add_req.link_hdl = s_ctrl->bridge_intf.link_hdl;
+ add_req.req_id = csl_packet->header.request_id;
+ CAM_DBG(CAM_SENSOR, " Rxed Req Id: %lld",
+ csl_packet->header.request_id);
+ add_req.dev_hdl = s_ctrl->bridge_intf.device_hdl;
+ add_req.skip_before_applying = 0;
+ if (s_ctrl->bridge_intf.crm_cb &&
+ s_ctrl->bridge_intf.crm_cb->add_req)
+ s_ctrl->bridge_intf.crm_cb->add_req(&add_req);
+
+ CAM_DBG(CAM_SENSOR, " add req to req mgr: %lld",
+ add_req.req_id);
+}
+
+static void cam_sensor_release_stream_rsc(
+ struct cam_sensor_ctrl_t *s_ctrl)
+{
+ struct i2c_settings_array *i2c_set = NULL;
+ int rc;
+
+ i2c_set = &(s_ctrl->i2c_data.streamoff_settings);
+ if (i2c_set->is_settings_valid == 1) {
+ i2c_set->is_settings_valid = -1;
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "failed while deleting Streamoff settings");
+ }
+
+ i2c_set = &(s_ctrl->i2c_data.streamon_settings);
+ if (i2c_set->is_settings_valid == 1) {
+ i2c_set->is_settings_valid = -1;
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "failed while deleting Streamon settings");
+ }
+}
+
+static void cam_sensor_release_per_frame_resource(
+ struct cam_sensor_ctrl_t *s_ctrl)
+{
+ struct i2c_settings_array *i2c_set = NULL;
+ int i, rc;
+
+ if (s_ctrl->i2c_data.per_frame != NULL) {
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
+ if (i2c_set->is_settings_valid == 1) {
+ i2c_set->is_settings_valid = -1;
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "delete request: %lld rc: %d",
+ i2c_set->request_id, rc);
+ }
+ }
+ }
+}
+
+static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
+ void *arg)
+{
+ int32_t rc = 0;
+ uintptr_t generic_ptr;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct i2c_settings_array *i2c_reg_settings = NULL;
+ size_t len_of_buff = 0;
+ uint32_t *offset = NULL;
+ struct cam_config_dev_cmd config;
+ struct i2c_data_settings *i2c_data = NULL;
+
+ ioctl_ctrl = (struct cam_control *)arg;
+
+ if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) {
+ CAM_ERR(CAM_SENSOR, "Invalid Handle Type");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&config,
+ u64_to_user_ptr(ioctl_ctrl->handle),
+ sizeof(config)))
+ return -EFAULT;
+
+ rc = cam_mem_get_cpu_buf(
+ config.packet_handle,
+ &generic_ptr,
+ &len_of_buff);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed in getting the buffer: %d", rc);
+ return rc;
+ }
+
+ csl_packet = (struct cam_packet *)(generic_ptr +
+ (uint32_t)config.offset);
+ if (config.offset > len_of_buff) {
+ CAM_ERR(CAM_SENSOR,
+ "offset is out of bounds: off: %lld len: %zu",
+ config.offset, len_of_buff);
+ return -EINVAL;
+ }
+
+ if ((csl_packet->header.op_code & 0xFFFFFF) !=
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG &&
+ csl_packet->header.request_id <= s_ctrl->last_flush_req
+ && s_ctrl->last_flush_req != 0) {
+ CAM_DBG(CAM_SENSOR,
+ "reject request %lld, last request to flush %lld",
+ csl_packet->header.request_id, s_ctrl->last_flush_req);
+ return -EINVAL;
+ }
+
+ if (csl_packet->header.request_id > s_ctrl->last_flush_req)
+ s_ctrl->last_flush_req = 0;
+
+ i2c_data = &(s_ctrl->i2c_data);
+ CAM_DBG(CAM_SENSOR, "Header OpCode: %d", csl_packet->header.op_code);
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG: {
+ i2c_reg_settings = &i2c_data->init_settings;
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = 1;
+ break;
+ }
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG: {
+ i2c_reg_settings = &i2c_data->config_settings;
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = 1;
+ break;
+ }
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON: {
+ if (s_ctrl->streamon_count > 0)
+ return 0;
+
+ s_ctrl->streamon_count = s_ctrl->streamon_count + 1;
+ i2c_reg_settings = &i2c_data->streamon_settings;
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = 1;
+ break;
+ }
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF: {
+ if (s_ctrl->streamoff_count > 0)
+ return 0;
+
+ s_ctrl->streamoff_count = s_ctrl->streamoff_count + 1;
+ i2c_reg_settings = &i2c_data->streamoff_settings;
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = 1;
+ break;
+ }
+
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE: {
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+ CAM_WARN(CAM_SENSOR,
+ "Rxed Update packets without linking");
+ return 0;
+ }
+
+ i2c_reg_settings =
+ &i2c_data->per_frame[csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY];
+ CAM_DBG(CAM_SENSOR, "Received Packet: %lld req: %lld",
+ csl_packet->header.request_id % MAX_PER_FRAME_ARRAY,
+ csl_packet->header.request_id);
+ if (i2c_reg_settings->is_settings_valid == 1) {
+ CAM_ERR(CAM_SENSOR,
+ "Already some pkt in offset req : %lld",
+ csl_packet->header.request_id);
+ /*
+ * Update req mgr even in case of failure.
+ * This will help not to wait indefinitely
+ * and freeze. If this log is triggered then
+ * fix it.
+ */
+ cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+ return 0;
+ }
+ break;
+ }
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: {
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+ CAM_WARN(CAM_SENSOR,
+ "Rxed NOP packets without linking");
+ return 0;
+ }
+
+ cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+ return 0;
+ }
+ default:
+ CAM_ERR(CAM_SENSOR, "Invalid Packet Header");
+ return -EINVAL;
+ }
+
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += csl_packet->cmd_buf_offset / 4;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+ rc = cam_sensor_i2c_command_parser(&s_ctrl->io_master_info,
+ i2c_reg_settings, cmd_desc, 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc);
+ return rc;
+ }
+
+ if ((csl_packet->header.op_code & 0xFFFFFF) ==
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE) {
+ i2c_reg_settings->request_id =
+ csl_packet->header.request_id;
+ cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+ }
+
+ return rc;
+}
+
+static int32_t cam_sensor_i2c_modes_util(
+ struct camera_io_master *io_master_info,
+ struct i2c_settings_list *i2c_list)
+{
+ int32_t rc = 0;
+ uint32_t i, size;
+
+ if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_RANDOM) {
+ rc = camera_io_dev_write(io_master_info,
+ &(i2c_list->i2c_settings));
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to random write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) {
+ rc = camera_io_dev_write_continuous(
+ io_master_info,
+ &(i2c_list->i2c_settings),
+ 0);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to seq write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ } else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_BURST) {
+ rc = camera_io_dev_write_continuous(
+ io_master_info,
+ &(i2c_list->i2c_settings),
+ 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to burst write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ } else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
+ size = i2c_list->i2c_settings.size;
+ for (i = 0; i < size; i++) {
+ rc = camera_io_dev_poll(
+ io_master_info,
+ i2c_list->i2c_settings.reg_setting[i].reg_addr,
+ i2c_list->i2c_settings.reg_setting[i].reg_data,
+ i2c_list->i2c_settings.reg_setting[i].data_mask,
+ i2c_list->i2c_settings.addr_type,
+ i2c_list->i2c_settings.data_type,
+ i2c_list->i2c_settings.reg_setting[i].delay);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "i2c poll apply setting Fail: %d", rc);
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+int32_t cam_sensor_update_i2c_info(struct cam_cmd_i2c_info *i2c_info,
+ struct cam_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct cam_sensor_cci_client *cci_client = NULL;
+
+ if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+ cci_client = s_ctrl->io_master_info.cci_client;
+ if (!cci_client) {
+ CAM_ERR(CAM_SENSOR, "failed: cci_client %pK",
+ cci_client);
+ return -EINVAL;
+ }
+ cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
+ cci_client->sid = i2c_info->slave_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+ CAM_DBG(CAM_SENSOR, " Master: %d sid: %d freq_mode: %d",
+ cci_client->cci_i2c_master, i2c_info->slave_addr,
+ i2c_info->i2c_freq_mode);
+ }
+
+ s_ctrl->sensordata->slave_info.sensor_slave_addr =
+ i2c_info->slave_addr;
+ return rc;
+}
+
+int32_t cam_sensor_update_slave_info(struct cam_cmd_probe *probe_info,
+ struct cam_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+
+ s_ctrl->sensordata->slave_info.sensor_id_reg_addr =
+ probe_info->reg_addr;
+ s_ctrl->sensordata->slave_info.sensor_id =
+ probe_info->expected_data;
+ s_ctrl->sensordata->slave_info.sensor_id_mask =
+ probe_info->data_mask;
+ /* Userspace passes the pipeline delay in reserved field */
+ s_ctrl->pipeline_delay =
+ probe_info->reserved;
+
+ s_ctrl->sensor_probe_addr_type = probe_info->addr_type;
+ s_ctrl->sensor_probe_data_type = probe_info->data_type;
+ CAM_DBG(CAM_SENSOR,
+ "Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x sensor_pipeline_delay:0x%x",
+ s_ctrl->sensordata->slave_info.sensor_id_reg_addr,
+ s_ctrl->sensordata->slave_info.sensor_id,
+ s_ctrl->sensordata->slave_info.sensor_id_mask,
+ s_ctrl->pipeline_delay);
+ return rc;
+}
+
+int32_t cam_handle_cmd_buffers_for_probe(void *cmd_buf,
+ struct cam_sensor_ctrl_t *s_ctrl,
+ int32_t cmd_buf_num, int cmd_buf_length)
+{
+ int32_t rc = 0;
+
+ switch (cmd_buf_num) {
+ case 0: {
+ struct cam_cmd_i2c_info *i2c_info = NULL;
+ struct cam_cmd_probe *probe_info;
+
+ i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+ rc = cam_sensor_update_i2c_info(i2c_info, s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed in Updating the i2c Info");
+ return rc;
+ }
+ probe_info = (struct cam_cmd_probe *)
+ (cmd_buf + sizeof(struct cam_cmd_i2c_info));
+ rc = cam_sensor_update_slave_info(probe_info, s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Updating the slave Info");
+ return rc;
+ }
+ cmd_buf = probe_info;
+ }
+ break;
+ case 1: {
+ rc = cam_sensor_update_power_settings(cmd_buf,
+ cmd_buf_length, &s_ctrl->sensordata->power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed in updating power settings");
+ return rc;
+ }
+ }
+ break;
+ default:
+ CAM_ERR(CAM_SENSOR, "Invalid command buffer");
+ break;
+ }
+ return rc;
+}
+
+int32_t cam_handle_mem_ptr(uint64_t handle, struct cam_sensor_ctrl_t *s_ctrl)
+{
+ int rc = 0, i;
+ uint32_t *cmd_buf;
+ void *ptr;
+ size_t len;
+ struct cam_packet *pkt;
+ struct cam_cmd_buf_desc *cmd_desc;
+ uintptr_t cmd_buf1 = 0;
+ uintptr_t packet = 0;
+
+ rc = cam_mem_get_cpu_buf(handle,
+ &packet, &len);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed to get the command Buffer");
+ return -EINVAL;
+ }
+ pkt = (struct cam_packet *)packet;
+ cmd_desc = (struct cam_cmd_buf_desc *)
+ ((uint32_t *)&pkt->payload + pkt->cmd_buf_offset/4);
+ if (cmd_desc == NULL) {
+ CAM_ERR(CAM_SENSOR, "command descriptor pos is invalid");
+ return -EINVAL;
+ }
+ if (pkt->num_cmd_buf != 2) {
+ CAM_ERR(CAM_SENSOR, "Expected More Command Buffers : %d",
+ pkt->num_cmd_buf);
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->num_cmd_buf; i++) {
+ if (!(cmd_desc[i].length))
+ continue;
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &cmd_buf1, &len);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to parse the command Buffer Header");
+ return -EINVAL;
+ }
+ cmd_buf = (uint32_t *)cmd_buf1;
+ cmd_buf += cmd_desc[i].offset/4;
+ ptr = (void *) cmd_buf;
+
+ rc = cam_handle_cmd_buffers_for_probe(ptr, s_ctrl,
+ i, cmd_desc[i].length);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to parse the command Buffer Header");
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+void cam_sensor_query_cap(struct cam_sensor_ctrl_t *s_ctrl,
+ struct cam_sensor_query_cap *query_cap)
+{
+ query_cap->pos_roll = s_ctrl->sensordata->pos_roll;
+ query_cap->pos_pitch = s_ctrl->sensordata->pos_pitch;
+ query_cap->pos_yaw = s_ctrl->sensordata->pos_yaw;
+ query_cap->secure_camera = 0;
+ query_cap->actuator_slot_id =
+ s_ctrl->sensordata->subdev_id[SUB_MODULE_ACTUATOR];
+ query_cap->csiphy_slot_id =
+ s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY];
+ query_cap->eeprom_slot_id =
+ s_ctrl->sensordata->subdev_id[SUB_MODULE_EEPROM];
+ query_cap->flash_slot_id =
+ s_ctrl->sensordata->subdev_id[SUB_MODULE_LED_FLASH];
+ query_cap->ois_slot_id =
+ s_ctrl->sensordata->subdev_id[SUB_MODULE_OIS];
+ query_cap->slot_info =
+ s_ctrl->soc_info.index;
+}
+
+static uint16_t cam_sensor_id_by_mask(struct cam_sensor_ctrl_t *s_ctrl,
+ uint32_t chipid)
+{
+ uint16_t sensor_id = (uint16_t)(chipid & 0xFFFF);
+ int16_t sensor_id_mask = s_ctrl->sensordata->slave_info.sensor_id_mask;
+
+ if (!sensor_id_mask)
+ sensor_id_mask = ~sensor_id_mask;
+
+ sensor_id &= sensor_id_mask;
+ sensor_id_mask &= -sensor_id_mask;
+ sensor_id_mask -= 1;
+ while (sensor_id_mask) {
+ sensor_id_mask >>= 1;
+ sensor_id >>= 1;
+ }
+ return sensor_id;
+}
+
+void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl)
+{
+ struct cam_sensor_power_ctrl_t *power_info =
+ &s_ctrl->sensordata->power_info;
+ int rc = 0;
+
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) &&
+ (s_ctrl->is_probe_succeed == 0))
+ return;
+
+ cam_sensor_release_stream_rsc(s_ctrl);
+ cam_sensor_release_per_frame_resource(s_ctrl);
+ cam_sensor_power_down(s_ctrl);
+
+ rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, " failed destroying dhdl");
+ s_ctrl->bridge_intf.device_hdl = -1;
+ s_ctrl->bridge_intf.link_hdl = -1;
+ s_ctrl->bridge_intf.session_hdl = -1;
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_setting_size = 0;
+ power_info->power_down_setting_size = 0;
+ s_ctrl->streamon_count = 0;
+ s_ctrl->streamoff_count = 0;
+ s_ctrl->is_probe_succeed = 0;
+ s_ctrl->last_flush_req = 0;
+ s_ctrl->sensor_state = CAM_SENSOR_INIT;
+}
+
+int cam_sensor_match_id(struct cam_sensor_ctrl_t *s_ctrl)
+{
+ int rc = 0;
+ uint32_t chipid = 0;
+ struct cam_camera_slave_info *slave_info;
+
+ slave_info = &(s_ctrl->sensordata->slave_info);
+
+ if (!slave_info) {
+ CAM_ERR(CAM_SENSOR, " failed: %pK",
+ slave_info);
+ return -EINVAL;
+ }
+
+ rc = camera_io_dev_read(
+ &(s_ctrl->io_master_info),
+ slave_info->sensor_id_reg_addr,
+ &chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
+ CAMERA_SENSOR_I2C_TYPE_WORD);
+
+ CAM_DBG(CAM_SENSOR, "read id: 0x%x expected id 0x%x:",
+ chipid, slave_info->sensor_id);
+ if (cam_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) {
+ CAM_ERR(CAM_SENSOR, "chip id %x does not match %x",
+ chipid, slave_info->sensor_id);
+ return -ENODEV;
+ }
+ return rc;
+}
+
+int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
+ void *arg)
+{
+ int rc = 0;
+ struct cam_control *cmd = (struct cam_control *)arg;
+ struct cam_sensor_power_ctrl_t *power_info =
+ &s_ctrl->sensordata->power_info;
+ if (!s_ctrl || !arg) {
+ CAM_ERR(CAM_SENSOR, "s_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ if (cmd->op_code != CAM_SENSOR_PROBE_CMD) {
+ if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+ CAM_ERR(CAM_SENSOR, "Invalid handle type: %d",
+ cmd->handle_type);
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&(s_ctrl->cam_sensor_mutex));
+ switch (cmd->op_code) {
+ case CAM_SENSOR_PROBE_CMD: {
+ if (s_ctrl->is_probe_succeed == 1) {
+ CAM_ERR(CAM_SENSOR,
+ "Already Sensor Probed in the slot");
+ break;
+ }
+
+ if (cmd->handle_type ==
+ CAM_HANDLE_MEM_HANDLE) {
+ rc = cam_handle_mem_ptr(cmd->handle, s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Get Buffer Handle Failed");
+ goto release_mutex;
+ }
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Command Type: %d",
+ cmd->handle_type);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ /* Parse and fill vreg params for powerup settings */
+ rc = msm_camera_fill_vreg_params(
+ &s_ctrl->soc_info,
+ s_ctrl->sensordata->power_info.power_setting,
+ s_ctrl->sensordata->power_info.power_setting_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Fail in filling vreg params for PUP rc %d",
+ rc);
+ goto free_power_settings;
+ }
+
+ /* Parse and fill vreg params for powerdown settings*/
+ rc = msm_camera_fill_vreg_params(
+ &s_ctrl->soc_info,
+ s_ctrl->sensordata->power_info.power_down_setting,
+ s_ctrl->sensordata->power_info.power_down_setting_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Fail in filling vreg params for PDOWN rc %d",
+ rc);
+ goto free_power_settings;
+ }
+
+ /* Power up and probe sensor */
+ rc = cam_sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "power up failed");
+ goto free_power_settings;
+ }
+
+ /* Match sensor ID */
+ rc = cam_sensor_match_id(s_ctrl);
+ if (rc < 0) {
+ cam_sensor_power_down(s_ctrl);
+ msleep(20);
+ goto free_power_settings;
+ }
+
+ CAM_INFO(CAM_SENSOR,
+ "Probe success,slot:%d,slave_addr:0x%x,sensor_id:0x%x",
+ s_ctrl->soc_info.index,
+ s_ctrl->sensordata->slave_info.sensor_slave_addr,
+ s_ctrl->sensordata->slave_info.sensor_id);
+
+ rc = cam_sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "fail in Sensor Power Down");
+ goto free_power_settings;
+ }
+ /*
+ * Set probe succeeded flag to 1 so that no other camera shall
+ * probed on this slot
+ */
+ s_ctrl->is_probe_succeed = 1;
+ s_ctrl->sensor_state = CAM_SENSOR_INIT;
+ }
+ break;
+ case CAM_ACQUIRE_DEV: {
+ struct cam_sensor_acquire_dev sensor_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+
+ if ((s_ctrl->is_probe_succeed == 0) ||
+ (s_ctrl->sensor_state != CAM_SENSOR_INIT)) {
+ CAM_WARN(CAM_SENSOR,
+ "Not in right state to aquire %d",
+ s_ctrl->sensor_state);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ if (s_ctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_SENSOR, "Device is already acquired");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ rc = copy_from_user(&sensor_acq_dev,
+ u64_to_user_ptr(cmd->handle),
+ sizeof(sensor_acq_dev));
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed Copying from user");
+ goto release_mutex;
+ }
+
+ bridge_params.session_hdl = sensor_acq_dev.session_handle;
+ bridge_params.ops = &s_ctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = s_ctrl;
+
+ sensor_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ s_ctrl->bridge_intf.device_hdl = sensor_acq_dev.device_handle;
+ s_ctrl->bridge_intf.session_hdl = sensor_acq_dev.session_handle;
+
+ CAM_DBG(CAM_SENSOR, "Device Handle: %d",
+ sensor_acq_dev.device_handle);
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &sensor_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev))) {
+ CAM_ERR(CAM_SENSOR, "Failed Copy to User");
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+
+ rc = cam_sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
+ goto release_mutex;
+ }
+
+ s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
+ s_ctrl->last_flush_req = 0;
+ CAM_INFO(CAM_SENSOR,
+ "CAM_ACQUIRE_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
+ s_ctrl->sensordata->slave_info.sensor_id,
+ s_ctrl->sensordata->slave_info.sensor_slave_addr);
+ }
+ break;
+ case CAM_RELEASE_DEV: {
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_START)) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_SENSOR,
+ "Not in right state to release : %d",
+ s_ctrl->sensor_state);
+ goto release_mutex;
+ }
+
+ if (s_ctrl->bridge_intf.link_hdl != -1) {
+ CAM_ERR(CAM_SENSOR,
+ "Device [%d] still active on link 0x%x",
+ s_ctrl->sensor_state,
+ s_ctrl->bridge_intf.link_hdl);
+ rc = -EAGAIN;
+ goto release_mutex;
+ }
+
+ rc = cam_sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
+ goto release_mutex;
+ }
+
+ cam_sensor_release_per_frame_resource(s_ctrl);
+ cam_sensor_release_stream_rsc(s_ctrl);
+ if (s_ctrl->bridge_intf.device_hdl == -1) {
+ CAM_ERR(CAM_SENSOR,
+ "Invalid Handles: link hdl: %d device hdl: %d",
+ s_ctrl->bridge_intf.device_hdl,
+ s_ctrl->bridge_intf.link_hdl);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "failed in destroying the device hdl");
+ s_ctrl->bridge_intf.device_hdl = -1;
+ s_ctrl->bridge_intf.link_hdl = -1;
+ s_ctrl->bridge_intf.session_hdl = -1;
+
+ s_ctrl->sensor_state = CAM_SENSOR_INIT;
+ CAM_INFO(CAM_SENSOR,
+ "CAM_RELEASE_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
+ s_ctrl->sensordata->slave_info.sensor_id,
+ s_ctrl->sensordata->slave_info.sensor_slave_addr);
+ s_ctrl->streamon_count = 0;
+ s_ctrl->streamoff_count = 0;
+ s_ctrl->last_flush_req = 0;
+ }
+ break;
+ case CAM_QUERY_CAP: {
+ struct cam_sensor_query_cap sensor_cap;
+
+ cam_sensor_query_cap(s_ctrl, &sensor_cap);
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &sensor_cap, sizeof(struct cam_sensor_query_cap))) {
+ CAM_ERR(CAM_SENSOR, "Failed Copy to User");
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ break;
+ }
+ case CAM_START_DEV: {
+ if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_START)) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_SENSOR,
+ "Not in right state to start : %d",
+ s_ctrl->sensor_state);
+ goto release_mutex;
+ }
+
+ if (s_ctrl->i2c_data.streamon_settings.is_settings_valid &&
+ (s_ctrl->i2c_data.streamon_settings.request_id == 0)) {
+ rc = cam_sensor_apply_settings(s_ctrl, 0,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "cannot apply streamon settings");
+ goto release_mutex;
+ }
+ }
+ s_ctrl->sensor_state = CAM_SENSOR_START;
+ CAM_INFO(CAM_SENSOR,
+ "CAM_START_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
+ s_ctrl->sensordata->slave_info.sensor_id,
+ s_ctrl->sensordata->slave_info.sensor_slave_addr);
+ }
+ break;
+ case CAM_STOP_DEV: {
+ if (s_ctrl->sensor_state != CAM_SENSOR_START) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_SENSOR,
+ "Not in right state to stop : %d",
+ s_ctrl->sensor_state);
+ goto release_mutex;
+ }
+
+ if (s_ctrl->i2c_data.streamoff_settings.is_settings_valid &&
+ (s_ctrl->i2c_data.streamoff_settings.request_id == 0)) {
+ rc = cam_sensor_apply_settings(s_ctrl, 0,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "cannot apply streamoff settings");
+ }
+ }
+
+ cam_sensor_release_per_frame_resource(s_ctrl);
+ s_ctrl->last_flush_req = 0;
+ s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
+ CAM_INFO(CAM_SENSOR,
+ "CAM_STOP_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
+ s_ctrl->sensordata->slave_info.sensor_id,
+ s_ctrl->sensordata->slave_info.sensor_slave_addr);
+ }
+ break;
+ case CAM_CONFIG_DEV: {
+ rc = cam_sensor_i2c_pkt_parse(s_ctrl, arg);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed CCI Config: %d", rc);
+ goto release_mutex;
+ }
+ if (s_ctrl->i2c_data.init_settings.is_settings_valid &&
+ (s_ctrl->i2c_data.init_settings.request_id == 0)) {
+
+ rc = cam_sensor_apply_settings(s_ctrl, 0,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "cannot apply init settings");
+ goto release_mutex;
+ }
+ rc = delete_request(&s_ctrl->i2c_data.init_settings);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Fail in deleting the Init settings");
+ goto release_mutex;
+ }
+ s_ctrl->i2c_data.init_settings.request_id = -1;
+ }
+
+ if (s_ctrl->i2c_data.config_settings.is_settings_valid &&
+ (s_ctrl->i2c_data.config_settings.request_id == 0)) {
+ rc = cam_sensor_apply_settings(s_ctrl, 0,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "cannot apply config settings");
+ goto release_mutex;
+ }
+ rc = delete_request(&s_ctrl->i2c_data.config_settings);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Fail in deleting the config settings");
+ goto release_mutex;
+ }
+ s_ctrl->sensor_state = CAM_SENSOR_CONFIG;
+ s_ctrl->i2c_data.config_settings.request_id = -1;
+ }
+ }
+ break;
+ default:
+ CAM_ERR(CAM_SENSOR, "Invalid Opcode: %d", cmd->op_code);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+release_mutex:
+ mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+ return rc;
+
+free_power_settings:
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_down_setting_size = 0;
+ power_info->power_setting_size = 0;
+ mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+ return rc;
+}
+
+int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+ int rc = 0;
+ struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+ if (!info)
+ return -EINVAL;
+
+ s_ctrl = (struct cam_sensor_ctrl_t *)
+ cam_get_device_priv(info->dev_hdl);
+
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ info->dev_id = CAM_REQ_MGR_DEVICE_SENSOR;
+ strlcpy(info->name, CAM_SENSOR_NAME, sizeof(info->name));
+ if (s_ctrl->pipeline_delay >= 1 && s_ctrl->pipeline_delay <= 3)
+ info->p_delay = s_ctrl->pipeline_delay;
+ else
+ info->p_delay = 2;
+ info->trigger = CAM_TRIGGER_POINT_SOF;
+
+ return rc;
+}
+
+int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
+{
+ struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+ if (!link)
+ return -EINVAL;
+
+ s_ctrl = (struct cam_sensor_ctrl_t *)
+ cam_get_device_priv(link->dev_hdl);
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&s_ctrl->cam_sensor_mutex);
+ if (link->link_enable) {
+ s_ctrl->bridge_intf.link_hdl = link->link_hdl;
+ s_ctrl->bridge_intf.crm_cb = link->crm_cb;
+ } else {
+ s_ctrl->bridge_intf.link_hdl = -1;
+ s_ctrl->bridge_intf.crm_cb = NULL;
+ }
+ mutex_unlock(&s_ctrl->cam_sensor_mutex);
+
+ return 0;
+}
+
+int cam_sensor_power(struct v4l2_subdev *sd, int on)
+{
+ struct cam_sensor_ctrl_t *s_ctrl = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&(s_ctrl->cam_sensor_mutex));
+ if (!on && s_ctrl->sensor_state == CAM_SENSOR_START) {
+ cam_sensor_power_down(s_ctrl);
+ s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
+ }
+ mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+
+ return 0;
+}
+
+int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl)
+{
+ int rc;
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_camera_slave_info *slave_info;
+ struct cam_hw_soc_info *soc_info =
+ &s_ctrl->soc_info;
+
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "failed: %pK", s_ctrl);
+ return -EINVAL;
+ }
+
+ power_info = &s_ctrl->sensordata->power_info;
+ slave_info = &(s_ctrl->sensordata->slave_info);
+
+ if (!power_info || !slave_info) {
+ CAM_ERR(CAM_SENSOR, "failed: %pK %pK", power_info, slave_info);
+ return -EINVAL;
+ }
+
+ if (s_ctrl->bob_pwm_switch) {
+ rc = cam_sensor_bob_pwm_mode_switch(soc_info,
+ s_ctrl->bob_reg_index, true);
+ if (rc) {
+ CAM_WARN(CAM_SENSOR,
+ "BoB PWM setup failed rc: %d", rc);
+ rc = 0;
+ }
+ }
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "power up the core is failed:%d", rc);
+ return rc;
+ }
+
+ rc = camera_io_init(&(s_ctrl->io_master_info));
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "cci_init failed: rc: %d", rc);
+
+ return rc;
+}
+
+int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
+{
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info;
+ int rc = 0;
+
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "failed: s_ctrl %pK", s_ctrl);
+ return -EINVAL;
+ }
+
+ power_info = &s_ctrl->sensordata->power_info;
+ soc_info = &s_ctrl->soc_info;
+
+ if (!power_info) {
+ CAM_ERR(CAM_SENSOR, "failed: power_info %pK", power_info);
+ return -EINVAL;
+ }
+ rc = cam_sensor_util_power_down(power_info, soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "power down the core is failed:%d", rc);
+ return rc;
+ }
+
+ if (s_ctrl->bob_pwm_switch) {
+ rc = cam_sensor_bob_pwm_mode_switch(soc_info,
+ s_ctrl->bob_reg_index, false);
+ if (rc) {
+ CAM_WARN(CAM_SENSOR,
+ "BoB PWM setup failed rc: %d", rc);
+ rc = 0;
+ }
+ }
+
+ camera_io_release(&(s_ctrl->io_master_info));
+
+ return rc;
+}
+
+int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl,
+ int64_t req_id, enum cam_sensor_packet_opcodes opcode)
+{
+ int rc = 0, offset, i;
+ uint64_t top = 0, del_req_id = 0;
+ struct i2c_settings_array *i2c_set = NULL;
+ struct i2c_settings_list *i2c_list;
+
+ if (req_id == 0) {
+ switch (opcode) {
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON: {
+ i2c_set = &s_ctrl->i2c_data.streamon_settings;
+ break;
+ }
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG: {
+ i2c_set = &s_ctrl->i2c_data.init_settings;
+ break;
+ }
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG: {
+ i2c_set = &s_ctrl->i2c_data.config_settings;
+ break;
+ }
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF: {
+ i2c_set = &s_ctrl->i2c_data.streamoff_settings;
+ break;
+ }
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE:
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE:
+ default:
+ return 0;
+ }
+ if (i2c_set->is_settings_valid == 1) {
+ list_for_each_entry(i2c_list,
+ &(i2c_set->list_head), list) {
+ rc = cam_sensor_i2c_modes_util(
+ &(s_ctrl->io_master_info),
+ i2c_list);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to apply settings: %d",
+ rc);
+ return rc;
+ }
+ }
+ }
+ } else {
+ offset = req_id % MAX_PER_FRAME_ARRAY;
+ i2c_set = &(s_ctrl->i2c_data.per_frame[offset]);
+ if (i2c_set->is_settings_valid == 1 &&
+ i2c_set->request_id == req_id) {
+ list_for_each_entry(i2c_list,
+ &(i2c_set->list_head), list) {
+ rc = cam_sensor_i2c_modes_util(
+ &(s_ctrl->io_master_info),
+ i2c_list);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to apply settings: %d",
+ rc);
+ return rc;
+ }
+ }
+ } else {
+ CAM_DBG(CAM_SENSOR,
+ "Invalid/NOP request to apply: %lld", req_id);
+ }
+
+ /* Change the logic dynamically */
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ if ((req_id >=
+ s_ctrl->i2c_data.per_frame[i].request_id) &&
+ (top <
+ s_ctrl->i2c_data.per_frame[i].request_id) &&
+ (s_ctrl->i2c_data.per_frame[i].
+ is_settings_valid == 1)) {
+ del_req_id = top;
+ top = s_ctrl->i2c_data.per_frame[i].request_id;
+ }
+ }
+
+ if (top < req_id) {
+ if ((((top % MAX_PER_FRAME_ARRAY) - (req_id %
+ MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) ||
+ (((top % MAX_PER_FRAME_ARRAY) - (req_id %
+ MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX))
+ del_req_id = req_id;
+ }
+
+ if (!del_req_id)
+ return rc;
+
+ CAM_DBG(CAM_SENSOR, "top: %llu, del_req_id:%llu",
+ top, del_req_id);
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ if ((del_req_id >
+ s_ctrl->i2c_data.per_frame[i].request_id) && (
+ s_ctrl->i2c_data.per_frame[i].is_settings_valid
+ == 1)) {
+ s_ctrl->i2c_data.per_frame[i].request_id = 0;
+ rc = delete_request(
+ &(s_ctrl->i2c_data.per_frame[i]));
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "Delete request Fail:%lld rc:%d",
+ del_req_id, rc);
+ }
+ }
+ }
+
+ return rc;
+}
+
+int32_t cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+ int32_t rc = 0;
+ struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+ if (!apply)
+ return -EINVAL;
+
+ s_ctrl = (struct cam_sensor_ctrl_t *)
+ cam_get_device_priv(apply->dev_hdl);
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
+ return -EINVAL;
+ }
+ CAM_DBG(CAM_REQ, " Sensor update req id: %lld", apply->request_id);
+ trace_cam_apply_req("Sensor", apply->request_id);
+ mutex_lock(&(s_ctrl->cam_sensor_mutex));
+ rc = cam_sensor_apply_settings(s_ctrl, apply->request_id,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE);
+ mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+ return rc;
+}
+
+int32_t cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush_req)
+{
+ int32_t rc = 0, i;
+ uint32_t cancel_req_id_found = 0;
+ struct cam_sensor_ctrl_t *s_ctrl = NULL;
+ struct i2c_settings_array *i2c_set = NULL;
+
+ if (!flush_req)
+ return -EINVAL;
+
+ s_ctrl = (struct cam_sensor_ctrl_t *)
+ cam_get_device_priv(flush_req->dev_hdl);
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ if (s_ctrl->i2c_data.per_frame == NULL) {
+ CAM_ERR(CAM_SENSOR, "i2c frame data is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(s_ctrl->cam_sensor_mutex));
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ s_ctrl->last_flush_req = flush_req->req_id;
+ CAM_DBG(CAM_SENSOR, "last reqest to flush is %lld",
+ flush_req->req_id);
+ }
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
+
+ if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+ && (i2c_set->request_id != flush_req->req_id))
+ continue;
+
+ if (i2c_set->is_settings_valid == 1) {
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "delete request: %lld rc: %d",
+ i2c_set->request_id, rc);
+
+ if (flush_req->type ==
+ CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ }
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+ !cancel_req_id_found)
+ CAM_DBG(CAM_SENSOR,
+ "Flush request id:%lld not found in the pending list",
+ flush_req->req_id);
+ mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_core.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_core.h
new file mode 100644
index 000000000000..373d6976a0ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_core.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_CORE_H_
+#define _CAM_SENSOR_CORE_H_
+
+#include "cam_sensor_dev.h"
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * This API powers up the camera sensor module
+ */
+int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * This API powers down the camera sensor module
+ */
+int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl);
+
+/**
+ * @sd: V4L2 subdevice
+ * @on: Turn off/on flag
+ *
+ * This API powers down the sensor module
+ */
+int cam_sensor_power(struct v4l2_subdev *sd, int on);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ * @req_id: Request id
+ * @opcode: opcode for settings
+ *
+ * This API applies the req_id settings to sensor
+ */
+int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl, int64_t req_id,
+ enum cam_sensor_packet_opcodes opcode);
+
+/**
+ * @apply: Req mgr structure for applying request
+ *
+ * This API applies the request that is mentioned
+ */
+int cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply);
+
+/**
+ * @flush: Req mgr structure for flushing request
+ *
+ * This API flushes the request that is mentioned
+ */
+int cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush);
+
+/**
+ * @info: Sub device info to req mgr
+ *
+ * Publish the subdevice info
+ */
+int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info);
+
+/**
+ * @link: Link setup info
+ *
+ * This API establishes link with sensor subdevice with req mgr
+ */
+int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ * @arg: Camera control command argument
+ *
+ * This API handles the camera control argument reached to sensor
+ */
+int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl, void *arg);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * This API handles the camera sensor close/shutdown
+ */
+void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl);
+
+#endif /* _CAM_SENSOR_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_dev.c
new file mode 100644
index 000000000000..8dcb6c73312f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -0,0 +1,392 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_sensor_soc.h"
+#include "cam_sensor_core.h"
+
+static long cam_sensor_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_sensor_ctrl_t *s_ctrl =
+ v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_sensor_driver_cmd(s_ctrl, arg);
+ break;
+ default:
+ CAM_ERR(CAM_SENSOR, "Invalid ioctl cmd: %d", cmd);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int cam_sensor_subdev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_sensor_ctrl_t *s_ctrl =
+ v4l2_get_subdevdata(sd);
+
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "s_ctrl ptr is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(s_ctrl->cam_sensor_mutex));
+ cam_sensor_shutdown(s_ctrl);
+ mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_sensor_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_SENSOR, "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_sensor_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "cam_sensor_subdev_ioctl failed");
+ break;
+ default:
+ CAM_ERR(CAM_SENSOR, "Invalid compat ioctl cmd_type: %d", cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to copy to user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
+ return rc;
+}
+
+#endif
+static struct v4l2_subdev_core_ops cam_sensor_subdev_core_ops = {
+ .ioctl = cam_sensor_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_sensor_init_subdev_do_ioctl,
+#endif
+ .s_power = cam_sensor_power,
+};
+
+static struct v4l2_subdev_ops cam_sensor_subdev_ops = {
+ .core = &cam_sensor_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_sensor_internal_ops = {
+ .close = cam_sensor_subdev_close,
+};
+
+static int cam_sensor_init_subdev_params(struct cam_sensor_ctrl_t *s_ctrl)
+{
+ int rc = 0;
+
+ s_ctrl->v4l2_dev_str.internal_ops =
+ &cam_sensor_internal_ops;
+ s_ctrl->v4l2_dev_str.ops =
+ &cam_sensor_subdev_ops;
+ strlcpy(s_ctrl->device_name, CAMX_SENSOR_DEV_NAME,
+ sizeof(s_ctrl->device_name));
+ s_ctrl->v4l2_dev_str.name =
+ s_ctrl->device_name;
+ s_ctrl->v4l2_dev_str.sd_flags =
+ (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+ s_ctrl->v4l2_dev_str.ent_function =
+ CAM_SENSOR_DEVICE_TYPE;
+ s_ctrl->v4l2_dev_str.token = s_ctrl;
+
+ rc = cam_register_subdev(&(s_ctrl->v4l2_dev_str));
+ if (rc)
+ CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev rc: %d", rc);
+
+ return rc;
+}
+
+static int32_t cam_sensor_driver_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int32_t rc = 0;
+ int i = 0;
+ struct cam_sensor_ctrl_t *s_ctrl = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ CAM_ERR(CAM_SENSOR,
+ "%s :i2c_check_functionality failed", client->name);
+ return -EFAULT;
+ }
+
+ /* Create sensor control structure */
+ s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
+ if (!s_ctrl)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, s_ctrl);
+
+ s_ctrl->io_master_info.client = client;
+ soc_info = &s_ctrl->soc_info;
+ soc_info->dev = &client->dev;
+ soc_info->dev_name = client->name;
+
+ /* Initialize sensor device type */
+ s_ctrl->of_node = client->dev.of_node;
+ s_ctrl->io_master_info.master_type = I2C_MASTER;
+ s_ctrl->is_probe_succeed = 0;
+ s_ctrl->last_flush_req = 0;
+
+ rc = cam_sensor_parse_dt(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "cam_sensor_parse_dt rc %d", rc);
+ goto free_s_ctrl;
+ }
+
+ rc = cam_sensor_init_subdev_params(s_ctrl);
+ if (rc)
+ goto free_s_ctrl;
+
+ s_ctrl->i2c_data.per_frame =
+ (struct i2c_settings_array *)
+ kzalloc(sizeof(struct i2c_settings_array) *
+ MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+ if (s_ctrl->i2c_data.per_frame == NULL) {
+ rc = -ENOMEM;
+ goto unreg_subdev;
+ }
+
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head));
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.config_settings.list_head));
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamon_settings.list_head));
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamoff_settings.list_head));
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.per_frame[i].list_head));
+
+ s_ctrl->bridge_intf.device_hdl = -1;
+ s_ctrl->bridge_intf.link_hdl = -1;
+ s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info;
+ s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link;
+ s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request;
+ s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request;
+
+ s_ctrl->sensordata->power_info.dev = soc_info->dev;
+ v4l2_set_subdevdata(&(s_ctrl->v4l2_dev_str.sd), s_ctrl);
+ return rc;
+unreg_subdev:
+ cam_unregister_subdev(&(s_ctrl->v4l2_dev_str));
+free_s_ctrl:
+ kfree(s_ctrl);
+ return rc;
+}
+
+static int cam_sensor_platform_remove(struct platform_device *pdev)
+{
+ int i;
+ struct cam_sensor_ctrl_t *s_ctrl;
+ struct cam_hw_soc_info *soc_info;
+
+ s_ctrl = platform_get_drvdata(pdev);
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "sensor device is NULL");
+ return 0;
+ }
+
+ soc_info = &s_ctrl->soc_info;
+ for (i = 0; i < soc_info->num_clk; i++)
+ devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+ kfree(s_ctrl->i2c_data.per_frame);
+ devm_kfree(&pdev->dev, s_ctrl);
+
+ return 0;
+}
+
+static int cam_sensor_driver_i2c_remove(struct i2c_client *client)
+{
+ int i;
+ struct cam_sensor_ctrl_t *s_ctrl = i2c_get_clientdata(client);
+ struct cam_hw_soc_info *soc_info;
+
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "sensor device is NULL");
+ return 0;
+ }
+
+ soc_info = &s_ctrl->soc_info;
+ for (i = 0; i < soc_info->num_clk; i++)
+ devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+ kfree(s_ctrl->i2c_data.per_frame);
+ kfree(s_ctrl);
+
+ return 0;
+}
+
+static const struct of_device_id cam_sensor_driver_dt_match[] = {
+ {.compatible = "qcom,cam-sensor"},
+ {}
+};
+
+static int32_t cam_sensor_driver_platform_probe(
+ struct platform_device *pdev)
+{
+ int32_t rc = 0, i = 0;
+ struct cam_sensor_ctrl_t *s_ctrl = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+
+ /* Create sensor control structure */
+ s_ctrl = devm_kzalloc(&pdev->dev,
+ sizeof(struct cam_sensor_ctrl_t), GFP_KERNEL);
+ if (!s_ctrl)
+ return -ENOMEM;
+
+ soc_info = &s_ctrl->soc_info;
+ soc_info->pdev = pdev;
+ soc_info->dev = &pdev->dev;
+ soc_info->dev_name = pdev->name;
+
+ /* Initialize sensor device type */
+ s_ctrl->of_node = pdev->dev.of_node;
+ s_ctrl->is_probe_succeed = 0;
+ s_ctrl->last_flush_req = 0;
+
+ /*fill in platform device*/
+ s_ctrl->pdev = pdev;
+
+ s_ctrl->io_master_info.master_type = CCI_MASTER;
+
+ rc = cam_sensor_parse_dt(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed: cam_sensor_parse_dt rc %d", rc);
+ goto free_s_ctrl;
+ }
+
+ /* Fill platform device id*/
+ pdev->id = soc_info->index;
+
+ rc = cam_sensor_init_subdev_params(s_ctrl);
+ if (rc)
+ goto free_s_ctrl;
+
+ s_ctrl->i2c_data.per_frame =
+ (struct i2c_settings_array *)
+ kzalloc(sizeof(struct i2c_settings_array) *
+ MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+ if (s_ctrl->i2c_data.per_frame == NULL) {
+ rc = -ENOMEM;
+ goto unreg_subdev;
+ }
+
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head));
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.config_settings.list_head));
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamon_settings.list_head));
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamoff_settings.list_head));
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.per_frame[i].list_head));
+
+ s_ctrl->bridge_intf.device_hdl = -1;
+ s_ctrl->bridge_intf.link_hdl = -1;
+ s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info;
+ s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link;
+ s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request;
+ s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request;
+
+ s_ctrl->sensordata->power_info.dev = &pdev->dev;
+ platform_set_drvdata(pdev, s_ctrl);
+ v4l2_set_subdevdata(&(s_ctrl->v4l2_dev_str.sd), s_ctrl);
+
+ s_ctrl->sensor_state = CAM_SENSOR_INIT;
+
+ return rc;
+unreg_subdev:
+ cam_unregister_subdev(&(s_ctrl->v4l2_dev_str));
+free_s_ctrl:
+ devm_kfree(&pdev->dev, s_ctrl);
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_sensor_driver_dt_match);
+
+static struct platform_driver cam_sensor_platform_driver = {
+ .probe = cam_sensor_driver_platform_probe,
+ .driver = {
+ .name = "qcom,camera",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_sensor_driver_dt_match,
+ .suppress_bind_attrs = true,
+ },
+ .remove = cam_sensor_platform_remove,
+};
+
+static const struct i2c_device_id i2c_id[] = {
+ {SENSOR_DRIVER_I2C, (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver cam_sensor_driver_i2c = {
+ .id_table = i2c_id,
+ .probe = cam_sensor_driver_i2c_probe,
+ .remove = cam_sensor_driver_i2c_remove,
+ .driver = {
+ .name = SENSOR_DRIVER_I2C,
+ },
+};
+
+static int __init cam_sensor_driver_init(void)
+{
+ int32_t rc = 0;
+
+ rc = platform_driver_register(&cam_sensor_platform_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "platform_driver_register Failed: rc = %d",
+ rc);
+ return rc;
+ }
+
+ rc = i2c_add_driver(&cam_sensor_driver_i2c);
+ if (rc)
+ CAM_ERR(CAM_SENSOR, "i2c_add_driver failed rc = %d", rc);
+
+ return rc;
+}
+
+static void __exit cam_sensor_driver_exit(void)
+{
+ platform_driver_unregister(&cam_sensor_platform_driver);
+ i2c_del_driver(&cam_sensor_driver_i2c);
+}
+
+module_init(cam_sensor_driver_init);
+module_exit(cam_sensor_driver_exit);
+MODULE_DESCRIPTION("cam_sensor_driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_dev.h
new file mode 100644
index 000000000000..e98a592dd5ea
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_DEV_H_
+#define _CAM_SENSOR_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_cci_dev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_subdev.h>
+#include <cam_sensor_io.h>
+#include "cam_debug_util.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE 1
+#define FALSE 0
+
+#undef CDBG
+#ifdef CAM_SENSOR_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define SENSOR_DRIVER_I2C "i2c_camera"
+#define CAMX_SENSOR_DEV_NAME "cam-sensor-driver"
+
+enum cam_sensor_state_t {
+ CAM_SENSOR_INIT,
+ CAM_SENSOR_ACQUIRE,
+ CAM_SENSOR_CONFIG,
+ CAM_SENSOR_START,
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @link_hdl: Link Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_sensor_ctrl_t: Camera control structure
+ * @pdev: Platform device
+ * @cam_sensor_mutex: Sensor mutex
+ * @sensordata: Sensor board Information
+ * @cci_i2c_master: I2C structure
+ * @io_master_info: Information about the communication master
+ * @sensor_state: Sensor states
+ * @is_probe_succeed: Probe succeeded or not
+ * @id: Cell Index
+ * @of_node: Of node ptr
+ * @v4l2_dev_str: V4L2 device structure
+ * @sensor_probe_addr_type: Sensor probe address type
+ * @sensor_probe_data_type: Sensor probe data type
+ * @i2c_data: Sensor I2C register settings
+ * @sensor_info: Sensor query cap structure
+ * @bridge_intf: Bridge interface structure
+ * @device_name: Sensor device structure
+ * @streamon_count: Count to hold the number of times stream on called
+ * @streamoff_count: Count to hold the number of times stream off called
+ * @bob_reg_index: Hold to BoB regulator index
+ * @bob_pwm_switch: Boolean flag to switch into PWM mode for BoB regulator
+ * @last_flush_req: Last request to flush
+ * @pipeline_delay: Sensor pipeline delay
+ */
+struct cam_sensor_ctrl_t {
+ struct platform_device *pdev;
+ struct cam_hw_soc_info soc_info;
+ struct mutex cam_sensor_mutex;
+ struct cam_sensor_board_info *sensordata;
+ enum cci_i2c_master_t cci_i2c_master;
+ enum cci_device_num cci_num;
+ struct camera_io_master io_master_info;
+ enum cam_sensor_state_t sensor_state;
+ uint8_t is_probe_succeed;
+ uint32_t id;
+ struct device_node *of_node;
+ struct cam_subdev v4l2_dev_str;
+ uint8_t sensor_probe_addr_type;
+ uint8_t sensor_probe_data_type;
+ struct i2c_data_settings i2c_data;
+ struct cam_sensor_query_cap sensor_info;
+ struct intf_params bridge_intf;
+ char device_name[20];
+ uint32_t streamon_count;
+ uint32_t streamoff_count;
+ int bob_reg_index;
+ bool bob_pwm_switch;
+ uint32_t last_flush_req;
+ uint16_t pipeline_delay;
+};
+
+#endif /* _CAM_SENSOR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_soc.c
new file mode 100644
index 000000000000..3ea03cfd6c22
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_soc.c
@@ -0,0 +1,293 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+#include "cam_sensor_soc.h"
+#include "cam_soc_util.h"
+
+int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
+ struct cam_sensor_board_info *s_info)
+{
+ int rc = 0, i = 0;
+ uint32_t val = 0;
+ struct device_node *src_node = NULL;
+ struct cam_sensor_board_info *sensor_info;
+
+ sensor_info = s_info;
+
+ for (i = 0; i < SUB_MODULE_MAX; i++)
+ sensor_info->subdev_id[i] = -1;
+
+ src_node = of_parse_phandle(of_node, "actuator-src", 0);
+ if (!src_node) {
+ CAM_DBG(CAM_SENSOR, "src_node NULL");
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CAM_DBG(CAM_SENSOR, "actuator cell index %d, rc %d", val, rc);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ of_node_put(src_node);
+ return rc;
+ }
+ sensor_info->subdev_id[SUB_MODULE_ACTUATOR] = val;
+ of_node_put(src_node);
+ }
+
+ src_node = of_parse_phandle(of_node, "ois-src", 0);
+ if (!src_node) {
+ CAM_DBG(CAM_SENSOR, "src_node NULL");
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CAM_DBG(CAM_SENSOR, " ois cell index %d, rc %d", val, rc);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ of_node_put(src_node);
+ return rc;
+ }
+ sensor_info->subdev_id[SUB_MODULE_OIS] = val;
+ of_node_put(src_node);
+ }
+
+ src_node = of_parse_phandle(of_node, "eeprom-src", 0);
+ if (!src_node) {
+ CAM_DBG(CAM_SENSOR, "eeprom src_node NULL");
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CAM_DBG(CAM_SENSOR, "eeprom cell index %d, rc %d", val, rc);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ of_node_put(src_node);
+ return rc;
+ }
+ sensor_info->subdev_id[SUB_MODULE_EEPROM] = val;
+ of_node_put(src_node);
+ }
+
+ src_node = of_parse_phandle(of_node, "led-flash-src", 0);
+ if (!src_node) {
+ CAM_DBG(CAM_SENSOR, " src_node NULL");
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CAM_DBG(CAM_SENSOR, "led flash cell index %d, rc %d", val, rc);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ of_node_put(src_node);
+ return rc;
+ }
+ sensor_info->subdev_id[SUB_MODULE_LED_FLASH] = val;
+ of_node_put(src_node);
+ }
+
+ rc = of_property_read_u32(of_node, "csiphy-sd-index", &val);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "paring the dt node for csiphy rc %d", rc);
+ else
+ sensor_info->subdev_id[SUB_MODULE_CSIPHY] = val;
+
+ return rc;
+}
+
+static int32_t cam_sensor_driver_get_dt_data(struct cam_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ int i = 0;
+ struct cam_sensor_board_info *sensordata = NULL;
+ struct device_node *of_node = s_ctrl->of_node;
+ struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
+
+ s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
+ if (!s_ctrl->sensordata)
+ return -ENOMEM;
+
+ sensordata = s_ctrl->sensordata;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed to read DT properties rc %d", rc);
+ goto FREE_SENSOR_DATA;
+ }
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &sensordata->power_info.gpio_num_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed to read gpios %d", rc);
+ goto FREE_SENSOR_DATA;
+ }
+
+ s_ctrl->id = soc_info->index;
+
+ /* Validate cell_id */
+ if (s_ctrl->id >= MAX_CAMERAS) {
+ CAM_ERR(CAM_SENSOR, "Failed invalid cell_id %d", s_ctrl->id);
+ rc = -EINVAL;
+ goto FREE_SENSOR_DATA;
+ }
+
+ /* Store the index of BoB regulator if it is available */
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ if (!strcmp(soc_info->rgltr_name[i],
+ "cam_bob")) {
+ CAM_DBG(CAM_SENSOR,
+ "i: %d cam_bob", i);
+ s_ctrl->bob_reg_index = i;
+ soc_info->rgltr[i] = devm_regulator_get(soc_info->dev,
+ soc_info->rgltr_name[i]);
+ if (IS_ERR_OR_NULL(soc_info->rgltr[i])) {
+ CAM_WARN(CAM_SENSOR,
+ "Regulator: %s get failed",
+ soc_info->rgltr_name[i]);
+ soc_info->rgltr[i] = NULL;
+ } else {
+ if (!of_property_read_bool(of_node,
+ "pwm-switch")) {
+ CAM_DBG(CAM_SENSOR,
+ "No BoB PWM switch param defined");
+ s_ctrl->bob_pwm_switch = false;
+ } else {
+ s_ctrl->bob_pwm_switch = true;
+ }
+ }
+ }
+ }
+
+ /* Read subdev info */
+ rc = cam_sensor_get_sub_module_index(of_node, sensordata);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed to get sub module index, rc=%d",
+ rc);
+ goto FREE_SENSOR_DATA;
+ }
+
+ if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+ /* Get CCI master */
+ rc = of_property_read_u32(of_node, "cci-master",
+ &s_ctrl->cci_i2c_master);
+ CAM_DBG(CAM_SENSOR, "cci-master %d, rc %d",
+ s_ctrl->cci_i2c_master, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ s_ctrl->cci_i2c_master = MASTER_0;
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "cci-device",
+ &s_ctrl->cci_num);
+ CAM_DBG(CAM_SENSOR, "cci-device %d, rc %d",
+ s_ctrl->cci_num, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ s_ctrl->cci_num = CCI_DEVICE_0;
+ rc = 0;
+ }
+ }
+
+ if (of_property_read_u32(of_node, "sensor-position-pitch",
+ &sensordata->pos_pitch) < 0) {
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
+ sensordata->pos_pitch = 360;
+ }
+ if (of_property_read_u32(of_node, "sensor-position-roll",
+ &sensordata->pos_roll) < 0) {
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
+ sensordata->pos_roll = 360;
+ }
+ if (of_property_read_u32(of_node, "sensor-position-yaw",
+ &sensordata->pos_yaw) < 0) {
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
+ sensordata->pos_yaw = 360;
+ }
+
+ return rc;
+
+FREE_SENSOR_DATA:
+ kfree(sensordata);
+ return rc;
+}
+
+int32_t msm_sensor_init_default_params(struct cam_sensor_ctrl_t *s_ctrl)
+{
+ /* Validate input parameters */
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "failed: invalid params s_ctrl %pK",
+ s_ctrl);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_SENSOR,
+ "master_type: %d", s_ctrl->io_master_info.master_type);
+ /* Initialize cci_client */
+ if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+ s_ctrl->io_master_info.cci_client = kzalloc(sizeof(
+ struct cam_sensor_cci_client), GFP_KERNEL);
+ if (!(s_ctrl->io_master_info.cci_client))
+ return -ENOMEM;
+ s_ctrl->io_master_info.cci_client->cci_device
+ = s_ctrl->cci_num;
+
+ } else if (s_ctrl->io_master_info.master_type == I2C_MASTER) {
+ if (!(s_ctrl->io_master_info.client))
+ return -EINVAL;
+ } else {
+ CAM_ERR(CAM_SENSOR,
+ "Invalid master / Master type Not supported");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int32_t cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl)
+{
+ int32_t i, rc = 0;
+ struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
+
+ /* Parse dt information and store in sensor control structure */
+ rc = cam_sensor_driver_get_dt_data(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed to get dt data rc %d", rc);
+ return rc;
+ }
+
+ /* Initialize mutex */
+ mutex_init(&(s_ctrl->cam_sensor_mutex));
+
+ /* Initialize default parameters */
+ for (i = 0; i < soc_info->num_clk; i++) {
+ soc_info->clk[i] = devm_clk_get(soc_info->dev,
+ soc_info->clk_name[i]);
+ if (!soc_info->clk[i]) {
+ CAM_ERR(CAM_SENSOR, "get failed for %s",
+ soc_info->clk_name[i]);
+ rc = -ENOENT;
+ return rc;
+ }
+ }
+ rc = msm_sensor_init_default_params(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "failed: msm_sensor_init_default_params rc %d", rc);
+ goto FREE_DT_DATA;
+ }
+
+ return rc;
+
+FREE_DT_DATA:
+ kfree(s_ctrl->sensordata);
+ s_ctrl->sensordata = NULL;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_soc.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_soc.h
new file mode 100644
index 000000000000..502645ccecbd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor/cam_sensor_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_SOC_H_
+#define _CAM_SENSOR_SOC_H_
+
+#include "cam_sensor_dev.h"
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * Parses sensor dt
+ */
+int cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl);
+
+#endif /* _CAM_SENSOR_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/Makefile
new file mode 100644
index 000000000000..324ab7b1891d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o cam_sensor_spi.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
new file mode 100644
index 000000000000..1ec7d9a018c8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -0,0 +1,239 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_sensor_i2c.h"
+#include "cam_cci_dev.h"
+
+int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type)
+{
+ int32_t rc = -EINVAL;
+ unsigned char buf[data_type];
+ struct cam_cci_ctrl cci_ctrl;
+
+ if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ return rc;
+
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
+ rc = v4l2_subdev_call(cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "rc = %d", rc);
+ return rc;
+ }
+
+ rc = cci_ctrl.status;
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+ *data = buf[0];
+ else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+ *data = buf[0] << 8 | buf[1];
+ else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+ *data = buf[0] << 16 | buf[1] << 8 | buf[2];
+ else
+ *data = buf[0] << 24 | buf[1] << 16 |
+ buf[2] << 8 | buf[3];
+
+ return rc;
+}
+
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+ int i = 0;
+ struct cam_cci_ctrl cci_ctrl;
+
+ if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (num_byte > I2C_REG_DATA_MAX)) {
+ CAM_ERR(CAM_SENSOR, "addr_type %d num_byte %d", addr_type,
+ num_byte);
+ return rc;
+ }
+
+ buf = kzalloc(num_byte, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+ cci_ctrl.status = -EFAULT;
+ rc = v4l2_subdev_call(cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ rc = cci_ctrl.status;
+ CAM_DBG(CAM_SENSOR, "addr = 0x%x, rc = %d", addr, rc);
+ for (i = 0; i < num_byte; i++) {
+ data[i] = buf[i];
+ CAM_DBG(CAM_SENSOR, "Byte %d: Data: 0x%x\n", i, data[i]);
+ }
+ kfree(buf);
+ return rc;
+}
+
+static int32_t cam_cci_i2c_write_table_cmd(
+ struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting,
+ enum cam_cci_cmd_type cmd)
+{
+ int32_t rc = -EINVAL;
+ struct cam_cci_ctrl cci_ctrl;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if (write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ return rc;
+
+ cci_ctrl.cmd = cmd;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting =
+ write_setting->reg_setting;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = write_setting->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ return rc;
+}
+
+
+int32_t cam_cci_i2c_write_table(
+ struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ return cam_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE);
+}
+
+int32_t cam_cci_i2c_write_continuous_table(
+ struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting,
+ uint8_t cam_sensor_i2c_write_flag)
+{
+ int32_t rc = 0;
+
+ if (cam_sensor_i2c_write_flag == 1)
+ rc = cam_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE_BURST);
+ else if (cam_sensor_i2c_write_flag == 0)
+ rc = cam_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE_SEQ);
+
+ return rc;
+}
+
+static int32_t cam_cci_i2c_compare(struct cam_sensor_cci_client *client,
+ uint32_t addr, uint16_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type data_type,
+ enum camera_sensor_i2c_type addr_type)
+{
+ int32_t rc;
+ uint32_t reg_data = 0;
+
+ rc = cam_cci_i2c_read(client, addr, &reg_data,
+ addr_type, data_type);
+ if (rc < 0)
+ return rc;
+
+ reg_data = reg_data & 0xFFFF;
+ if (data == (reg_data & ~data_mask))
+ return I2C_COMPARE_MATCH;
+ return I2C_COMPARE_MISMATCH;
+}
+
+int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client,
+ uint32_t addr, uint16_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type data_type,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t delay_ms)
+{
+ int32_t rc = -EINVAL;
+ int32_t i = 0;
+
+ CAM_DBG(CAM_SENSOR, "addr: 0x%x data: 0x%x dt: %d",
+ addr, data, data_type);
+
+ if (delay_ms > MAX_POLL_DELAY_MS) {
+ CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+ delay_ms, MAX_POLL_DELAY_MS);
+ return -EINVAL;
+ }
+ for (i = 0; i < delay_ms; i++) {
+ rc = cam_cci_i2c_compare(client,
+ addr, data, data_mask, data_type, addr_type);
+ if (!rc)
+ return rc;
+
+ usleep_range(1000, 1010);
+ }
+
+ /* If rc is 1 then read is successful but poll is failure */
+ if (rc == 1)
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)", rc);
+
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
+
+ return rc;
+}
+
+int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client,
+ uint16_t cci_cmd)
+{
+ int32_t rc = 0;
+ struct cam_cci_ctrl cci_ctrl;
+
+ cci_ctrl.cmd = cci_cmd;
+ cci_ctrl.cci_info = cci_client;
+ rc = v4l2_subdev_call(cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
+ return rc;
+ }
+ return cci_ctrl.status;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
new file mode 100644
index 000000000000..e79fffb49d77
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -0,0 +1,182 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_I2C_H_
+#define _CAM_SENSOR_I2C_H_
+
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_sensor.h>
+#include <media/cam_sensor.h>
+#include "cam_cci_dev.h"
+#include "cam_sensor_io.h"
+
+#define I2C_POLL_TIME_MS 5
+#define MAX_POLL_DELAY_MS 100
+
+#define I2C_COMPARE_MATCH 0
+#define I2C_COMPARE_MISMATCH 1
+
+#define I2C_REG_DATA_MAX (8*1024)
+
+/**
+ * @client: CCI client structure
+ * @data: I2C data
+ * @addr_type: I2c address type
+ * @data_type: I2C data type
+ *
+ * This API handles CCI read
+ */
+int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type);
+
+/**
+ * @client: CCI client structure
+ * @addr: I2c address
+ * @data: I2C data
+ * @addr_type: I2c address type
+ * @data_type: I2c data type
+ * @num_byte: number of bytes
+ *
+ * This API handles CCI sequential read
+ */
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ uint32_t num_byte);
+
+/**
+ * @client: CCI client structure
+ * @write_setting: I2C register setting
+ *
+ * This API handles CCI random write
+ */
+int32_t cam_cci_i2c_write_table(
+ struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * @client: CCI client structure
+ * @write_setting: I2C register setting
+ * @cam_sensor_i2c_write_flag: burst or seq write
+ *
+ * This API handles CCI continuous write
+ */
+int32_t cam_cci_i2c_write_continuous_table(
+ struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting,
+ uint8_t cam_sensor_i2c_write_flag);
+
+/**
+ * @cci_client: CCI client structure
+ * @cci_cmd: CCI command type
+ *
+ * Does I2C call to I2C functionalities
+ */
+int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client,
+ uint16_t cci_cmd);
+
+/**
+ * @client: CCI client structure
+ * @addr: I2C address
+ * @data: I2C data
+ * @data_mask: I2C data mask
+ * @data_type: I2C data type
+ * @addr_type: I2C addr type
+ * @delay_ms: Delay in milli seconds
+ *
+ * This API implements CCI based I2C poll
+ */
+int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client,
+ uint32_t addr, uint16_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type data_type,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t delay_ms);
+
+
+/**
+ * cam_qup_i2c_read : QUP based i2c read
+ * @client : QUP I2C client structure
+ * @data : I2C data
+ * @addr_type : I2c address type
+ * @data_type : I2C data type
+ *
+ * This API handles QUP I2C read
+ */
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type);
+
+/**
+ * cam_qup_i2c_read_seq : QUP based I2C sequential read
+ * @client : QUP I2C client structure
+ * @data : I2C data
+ * @addr_type : I2c address type
+ * @num_bytes : number of bytes to read
+ * This API handles QUP I2C Sequential read
+ */
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte);
+
+/**
+ * cam_qup_i2c_poll : QUP based I2C poll operation
+ * @client : QUP I2C client structure
+ * @addr : I2C address
+ * @data : I2C data
+ * @data_mask : I2C data mask
+ * @data_type : I2C data type
+ * @addr_type : I2C addr type
+ * @delay_ms : Delay in milli seconds
+ *
+ * This API implements QUP based I2C poll
+ */
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+ uint32_t addr, uint16_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ uint32_t delay_ms);
+
+/**
+ * cam_qup_i2c_write_table : QUP based I2C write random
+ * @client : QUP I2C client structure
+ * @write_setting : I2C register settings
+ *
+ * This API handles QUP I2C random write
+ */
+
+int32_t cam_qup_i2c_write_table(
+ struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * cam_qup_i2c_write_continuous_write: QUP based I2C write continuous(Burst/Seq)
+ * @client: QUP I2C client structure
+ * @write_setting: I2C register setting
+ * @cam_sensor_i2c_write_flag: burst or seq write
+ *
+ * This API handles QUP continuous write
+ */
+int32_t cam_qup_i2c_write_continuous_table(
+ struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting,
+ uint8_t cam_sensor_i2c_write_flag);
+
+#endif /*_CAM_SENSOR_I2C_H*/
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
new file mode 100644
index 000000000000..585e5516425f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -0,0 +1,191 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_io.h"
+#include "cam_sensor_i2c.h"
+
+int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
+ uint32_t addr, uint16_t data, uint32_t data_mask,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ uint32_t delay_ms)
+{
+ int16_t mask = data_mask & 0xFF;
+
+ if (!io_master_info) {
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ if (io_master_info->master_type == CCI_MASTER) {
+ return cam_cci_i2c_poll(io_master_info->cci_client,
+ addr, data, mask, data_type, addr_type, delay_ms);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_poll(io_master_info->client,
+ addr, data, data_mask, addr_type, data_type,
+ delay_ms);
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
+ return -EINVAL;
+ }
+}
+
+int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type)
+{
+ if (!io_master_info) {
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ if (io_master_info->master_type == CCI_MASTER) {
+ return cam_cci_i2c_read(io_master_info->cci_client,
+ addr, data, addr_type, data_type);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_read(io_master_info->client,
+ addr, data, addr_type, data_type);
+ } else if (io_master_info->master_type == SPI_MASTER) {
+ return cam_spi_read(io_master_info,
+ addr, data, addr_type, data_type);
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type, int32_t num_bytes)
+{
+ if (io_master_info->master_type == CCI_MASTER) {
+ return cam_camera_cci_i2c_read_seq(io_master_info->cci_client,
+ addr, data, addr_type, data_type, num_bytes);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_read_seq(io_master_info->client,
+ addr, data, addr_type, num_bytes);
+ } else if (io_master_info->master_type == SPI_MASTER) {
+ return cam_spi_read_seq(io_master_info,
+ addr, data, addr_type, num_bytes);
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ if (!write_setting || !io_master_info) {
+ CAM_ERR(CAM_SENSOR,
+ "Input parameters not valid ws: %pK ioinfo: %pK",
+ write_setting, io_master_info);
+ return -EINVAL;
+ }
+
+ if (!write_setting->reg_setting) {
+ CAM_ERR(CAM_SENSOR, "Invalid Register Settings");
+ return -EINVAL;
+ }
+
+ if (io_master_info->master_type == CCI_MASTER) {
+ return cam_cci_i2c_write_table(io_master_info,
+ write_setting);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_write_table(io_master_info,
+ write_setting);
+ } else if (io_master_info->master_type == SPI_MASTER) {
+ return cam_spi_write_table(io_master_info,
+ write_setting);
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
+ return -EINVAL;
+ }
+}
+
+int32_t camera_io_dev_write_continuous(struct camera_io_master *io_master_info,
+ struct cam_sensor_i2c_reg_setting *write_setting,
+ uint8_t cam_sensor_i2c_write_flag)
+{
+ if (!write_setting || !io_master_info) {
+ CAM_ERR(CAM_SENSOR,
+ "Input parameters not valid ws: %pK ioinfo: %pK",
+ write_setting, io_master_info);
+ return -EINVAL;
+ }
+
+ if (!write_setting->reg_setting) {
+ CAM_ERR(CAM_SENSOR, "Invalid Register Settings");
+ return -EINVAL;
+ }
+
+ if (io_master_info->master_type == CCI_MASTER) {
+ return cam_cci_i2c_write_continuous_table(io_master_info,
+ write_setting, cam_sensor_i2c_write_flag);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_write_continuous_table(io_master_info,
+ write_setting, cam_sensor_i2c_write_flag);
+ } else if (io_master_info->master_type == SPI_MASTER) {
+ return cam_spi_write_table(io_master_info,
+ write_setting);
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
+ return -EINVAL;
+ }
+}
+
+int32_t camera_io_init(struct camera_io_master *io_master_info)
+{
+ if (!io_master_info) {
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ if (io_master_info->master_type == CCI_MASTER) {
+ io_master_info->cci_client->cci_subdev =
+ cam_cci_get_subdev(io_master_info->cci_client->cci_device);
+ return cam_sensor_cci_i2c_util(io_master_info->cci_client,
+ MSM_CCI_INIT);
+ } else if ((io_master_info->master_type == I2C_MASTER) ||
+ (io_master_info->master_type == SPI_MASTER)) {
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int32_t camera_io_release(struct camera_io_master *io_master_info)
+{
+ if (!io_master_info) {
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
+ return -EINVAL;
+ }
+
+ if (io_master_info->master_type == CCI_MASTER) {
+ return cam_sensor_cci_i2c_util(io_master_info->cci_client,
+ MSM_CCI_RELEASE);
+ } else if ((io_master_info->master_type == I2C_MASTER) ||
+ (io_master_info->master_type == SPI_MASTER)) {
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
new file mode 100644
index 000000000000..f1143c80dbe0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_IO_H_
+#define _CAM_SENSOR_IO_H_
+
+#include <media/cam_sensor.h>
+
+#include "cam_sensor_cmn_header.h"
+
+#define CCI_MASTER 1
+#define I2C_MASTER 2
+#define SPI_MASTER 3
+
+/**
+ * @master_type: CCI master type
+ * @client: I2C client information structure
+ * @cci_client: CCI client information structure
+ * @spi_client: SPI client information structure
+ */
+struct camera_io_master {
+ int master_type;
+ struct i2c_client *client;
+ struct cam_sensor_cci_client *cci_client;
+ struct cam_sensor_spi_client *spi_client;
+};
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @addr_type: I2C addr_type
+ * @data_type: I2C data type
+ *
+ * This API abstracts read functionality based on master type
+ */
+int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @addr_type: I2C addr type
+ * @data_type: I2C data type
+ * @num_bytes: number of bytes
+ *
+ * This API abstracts read functionality based on master type
+ */
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ int32_t num_bytes);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ *
+ * This API initializes the I2C/SPI master based on master type
+ */
+int32_t camera_io_init(struct camera_io_master *io_master_info);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ *
+ * This API releases the I2C/SPI master based on master type
+ */
+int32_t camera_io_release(struct camera_io_master *io_master_info);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @write_setting: write settings information
+ *
+ * This API abstracts write functionality based on master type
+ */
+int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
+ struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @write_setting: write settings information
+ * @cam_sensor_i2c_write_flag: differentiate between burst & seq
+ *
+ * This API abstracts write functionality based on master type and
+ * write flag for continuous write
+ */
+int32_t camera_io_dev_write_continuous(struct camera_io_master *io_master_info,
+ struct cam_sensor_i2c_reg_setting *write_setting,
+ uint8_t cam_sensor_i2c_write_flag);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @data_mask: I2C data mask
+ * @data_type: I2C data type
+ * @addr_type: I2C address type
+ * @delay_ms: delay in milli seconds
+ *
+ * This API abstracts poll functionality based on master type
+ */
+int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
+ uint32_t addr, uint16_t data, uint32_t data_mask,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ uint32_t delay_ms);
+
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_spi.h"
+#endif /* _CAM_SENSOR_IO_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
new file mode 100644
index 000000000000..9145a1e52f3e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -0,0 +1,537 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_io.h"
+
+#define I2C_REG_DATA_MAX (8*1024)
+#define I2C_REG_MAX_BUF_SIZE 8
+
+static int32_t cam_qup_i2c_rxdata(
+ struct i2c_client *dev_client, unsigned char *rxdata,
+ enum camera_sensor_i2c_type addr_type,
+ int data_length)
+{
+ int32_t rc = 0;
+ uint16_t saddr = dev_client->addr >> 1;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = saddr,
+ .flags = 0,
+ .len = addr_type,
+ .buf = rxdata,
+ },
+ {
+ .addr = saddr,
+ .flags = I2C_M_RD,
+ .len = data_length,
+ .buf = rxdata,
+ },
+ };
+ rc = i2c_transfer(dev_client->adapter, msgs, 2);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "failed 0x%x", saddr);
+ return rc;
+}
+
+
+static int32_t cam_qup_i2c_txdata(
+ struct camera_io_master *dev_client, unsigned char *txdata,
+ int length)
+{
+ int32_t rc = 0;
+ uint16_t saddr = dev_client->client->addr >> 1;
+ struct i2c_msg msg[] = {
+ {
+ .addr = saddr,
+ .flags = 0,
+ .len = length,
+ .buf = txdata,
+ },
+ };
+ rc = i2c_transfer(dev_client->client->adapter, msg, 1);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "failed 0x%x", saddr);
+ return rc;
+}
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type)
+{
+ int32_t rc = -EINVAL;
+ unsigned char *buf = NULL;
+
+ if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verfication");
+ return rc;
+ }
+
+ buf = kzalloc(addr_type + data_type, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = addr >> 8;
+ buf[1] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[0] = addr >> 16;
+ buf[1] = addr >> 8;
+ buf[2] = addr;
+ } else {
+ buf[0] = addr >> 24;
+ buf[1] = addr >> 16;
+ buf[2] = addr >> 8;
+ buf[3] = addr;
+ }
+
+ rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+ goto read_fail;
+ }
+
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+ *data = buf[0];
+ else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+ *data = buf[0] << 8 | buf[1];
+ else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+ *data = buf[0] << 16 | buf[1] << 8 | buf[2];
+ else
+ *data = buf[0] << 24 | buf[1] << 16 |
+ buf[2] << 8 | buf[3];
+
+ CAM_DBG(CAM_SENSOR, "addr = 0x%x data: 0x%x", addr, *data);
+read_fail:
+ kfree(buf);
+ buf = NULL;
+ return rc;
+}
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+ int i;
+
+ if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
+ return rc;
+ }
+
+ if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) {
+ CAM_ERR(CAM_SENSOR, "num_byte:0x%x max supported:0x%x",
+ num_byte, I2C_REG_DATA_MAX);
+ return rc;
+ }
+
+ buf = kzalloc(addr_type + num_byte, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[0] = addr >> 16;
+ buf[1] = addr >> 8;
+ buf[2] = addr;
+ } else {
+ buf[0] = addr >> 24;
+ buf[1] = addr >> 16;
+ buf[2] = addr >> 8;
+ buf[3] = addr;
+ }
+
+ rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+ goto read_seq_fail;
+ }
+
+ for (i = 0; i < num_byte; i++)
+ data[i] = buf[i];
+
+read_seq_fail:
+ kfree(buf);
+ buf = NULL;
+ return rc;
+}
+
+static int32_t cam_qup_i2c_compare(struct i2c_client *client,
+ uint32_t addr, uint32_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type data_type,
+ enum camera_sensor_i2c_type addr_type)
+{
+ int32_t rc;
+ uint32_t reg_data = 0;
+
+ rc = cam_qup_i2c_read(client, addr, &reg_data,
+ addr_type, data_type);
+ if (rc < 0)
+ return rc;
+
+ reg_data = reg_data & 0xFFFF;
+ if (data != (reg_data & ~data_mask))
+ return I2C_COMPARE_MISMATCH;
+
+ return I2C_COMPARE_MATCH;
+}
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+ uint32_t addr, uint16_t data, uint16_t data_mask,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type,
+ uint32_t delay_ms)
+{
+ int32_t rc = 0;
+ int i = 0;
+
+ if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) {
+ CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+ delay_ms, MAX_POLL_DELAY_MS);
+ return -EINVAL;
+ }
+
+ if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+ return -EINVAL;
+
+ for (i = 0; i < delay_ms; i++) {
+ rc = cam_qup_i2c_compare(client,
+ addr, data, data_mask, data_type, addr_type);
+ if (rc == I2C_COMPARE_MATCH)
+ return rc;
+
+ usleep_range(1000, 1010);
+ }
+ /* If rc is MISMATCH then read is successful but poll is failure */
+ if (rc == I2C_COMPARE_MISMATCH)
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)", rc);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
+
+ return rc;
+}
+
+static int32_t cam_qup_i2c_write(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_array *reg_setting,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type)
+{
+ int32_t rc = 0;
+ unsigned char *buf = NULL;
+ uint8_t len = 0;
+
+ buf = kzalloc(I2C_REG_MAX_BUF_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ CAM_ERR(CAM_SENSOR, "Buffer memory allocation failed");
+ return -ENOMEM;
+ }
+
+ CAM_DBG(CAM_SENSOR, "reg addr = 0x%x data type: %d",
+ reg_setting->reg_addr, data_type);
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = reg_setting->reg_addr;
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+ len = 1;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = reg_setting->reg_addr >> 8;
+ buf[1] = reg_setting->reg_addr;
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len+1, buf[len+1]);
+ len = 2;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[0] = reg_setting->reg_addr >> 16;
+ buf[1] = reg_setting->reg_addr >> 8;
+ buf[2] = reg_setting->reg_addr;
+ len = 3;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+ buf[0] = reg_setting->reg_addr >> 24;
+ buf[1] = reg_setting->reg_addr >> 16;
+ buf[2] = reg_setting->reg_addr >> 8;
+ buf[3] = reg_setting->reg_addr;
+ len = 4;
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid I2C addr type");
+ rc = -EINVAL;
+ goto deallocate_buffer;
+ }
+
+ CAM_DBG(CAM_SENSOR, "Data: 0x%x", reg_setting->reg_data);
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[len] = reg_setting->reg_data;
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ len += 1;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[len] = reg_setting->reg_data >> 8;
+ buf[len+1] = reg_setting->reg_data;
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+ len += 2;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[len] = reg_setting->reg_data >> 16;
+ buf[len + 1] = reg_setting->reg_data >> 8;
+ buf[len + 2] = reg_setting->reg_data;
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
+ len += 3;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+ buf[len] = reg_setting->reg_data >> 24;
+ buf[len + 1] = reg_setting->reg_data >> 16;
+ buf[len + 2] = reg_setting->reg_data >> 8;
+ buf[len + 3] = reg_setting->reg_data;
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+3, buf[len+3]);
+ len += 4;
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Data Type");
+ rc = -EINVAL;
+ goto deallocate_buffer;
+ }
+
+ rc = cam_qup_i2c_txdata(client, buf, len);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+
+deallocate_buffer:
+ kfree(buf);
+ return rc;
+}
+
+int32_t cam_qup_i2c_write_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EINVAL;
+ struct cam_sensor_i2c_reg_array *reg_setting;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)))
+ return rc;
+
+ reg_setting = write_setting->reg_setting;
+
+ for (i = 0; i < write_setting->size; i++) {
+ CAM_DBG(CAM_SENSOR, "addr 0x%x data 0x%x",
+ reg_setting->reg_addr, reg_setting->reg_data);
+
+ rc = cam_qup_i2c_write(client, reg_setting,
+ write_setting->addr_type, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ return rc;
+}
+
+static int32_t cam_qup_i2c_write_seq(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = 0;
+ struct cam_sensor_i2c_reg_array *reg_setting;
+
+ reg_setting = write_setting->reg_setting;
+
+ for (i = 0; i < write_setting->size; i++) {
+ reg_setting->reg_addr += i;
+ rc = cam_qup_i2c_write(client, reg_setting,
+ write_setting->addr_type, write_setting->data_type);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Sequential i2c write failed: rc: %d", rc);
+ break;
+ }
+ reg_setting++;
+ }
+
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ return rc;
+}
+
+static int32_t cam_qup_i2c_write_burst(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = 0;
+ uint32_t len = 0;
+ unsigned char *buf = NULL;
+ struct cam_sensor_i2c_reg_array *reg_setting;
+ enum camera_sensor_i2c_type addr_type;
+ enum camera_sensor_i2c_type data_type;
+
+ buf = kzalloc((write_setting->addr_type +
+ (write_setting->size * write_setting->data_type)),
+ GFP_KERNEL);
+
+ if (!buf) {
+ CAM_ERR(CAM_SENSOR, "BUF is NULL");
+ return -ENOMEM;
+ }
+
+ reg_setting = write_setting->reg_setting;
+ addr_type = write_setting->addr_type;
+ data_type = write_setting->data_type;
+
+ CAM_DBG(CAM_SENSOR, "reg addr = 0x%x data type: %d",
+ reg_setting->reg_addr, data_type);
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = reg_setting->reg_addr;
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+ len = 1;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = reg_setting->reg_addr >> 8;
+ buf[1] = reg_setting->reg_addr;
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len+1, buf[len+1]);
+ len = 2;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[0] = reg_setting->reg_addr >> 16;
+ buf[1] = reg_setting->reg_addr >> 8;
+ buf[2] = reg_setting->reg_addr;
+ len = 3;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+ buf[0] = reg_setting->reg_addr >> 24;
+ buf[1] = reg_setting->reg_addr >> 16;
+ buf[2] = reg_setting->reg_addr >> 8;
+ buf[3] = reg_setting->reg_addr;
+ len = 4;
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid I2C addr type");
+ rc = -EINVAL;
+ goto free_res;
+ }
+
+ for (i = 0; i < write_setting->size; i++) {
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[len] = reg_setting->reg_data;
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len, buf[len]);
+ len += 1;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[len] = reg_setting->reg_data >> 8;
+ buf[len+1] = reg_setting->reg_data;
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len+1, buf[len+1]);
+ len += 2;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[len] = reg_setting->reg_data >> 16;
+ buf[len + 1] = reg_setting->reg_data >> 8;
+ buf[len + 2] = reg_setting->reg_data;
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len+2, buf[len+2]);
+ len += 3;
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+ buf[len] = reg_setting->reg_data >> 24;
+ buf[len + 1] = reg_setting->reg_data >> 16;
+ buf[len + 2] = reg_setting->reg_data >> 8;
+ buf[len + 3] = reg_setting->reg_data;
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len+2, buf[len+2]);
+ CAM_DBG(CAM_SENSOR,
+ "Byte %d: 0x%x", len+3, buf[len+3]);
+ len += 4;
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Data Type");
+ rc = -EINVAL;
+ goto free_res;
+ }
+ reg_setting++;
+ }
+
+ if (len > (write_setting->addr_type +
+ (write_setting->size * write_setting->data_type))) {
+ CAM_ERR(CAM_SENSOR, "Invalid Length: %u | Expected length: %u",
+ len, (write_setting->addr_type +
+ (write_setting->size * write_setting->data_type)));
+ rc = -EINVAL;
+ goto free_res;
+ }
+
+ rc = cam_qup_i2c_txdata(client, buf, len);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+
+free_res:
+ kfree(buf);
+ return rc;
+}
+
+int32_t cam_qup_i2c_write_continuous_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_settings,
+ uint8_t cam_sensor_i2c_write_flag)
+{
+ int32_t rc = 0;
+
+ if (!client || !write_settings)
+ return -EINVAL;
+
+ if ((write_settings->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || write_settings->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || (write_settings->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || write_settings->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)))
+ return -EINVAL;
+
+ if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_BURST)
+ rc = cam_qup_i2c_write_burst(client, write_settings);
+ else if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_SEQ)
+ rc = cam_qup_i2c_write_seq(client, write_settings);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
new file mode 100644
index 000000000000..f699cc2096ba
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
@@ -0,0 +1,617 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_spi.h"
+#include "cam_debug_util.h"
+
+static int cam_spi_txfr(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int num_byte)
+{
+ struct spi_transfer txfr;
+ struct spi_message msg;
+
+ memset(&txfr, 0, sizeof(txfr));
+ txfr.tx_buf = txbuf;
+ txfr.rx_buf = rxbuf;
+ txfr.len = num_byte;
+ spi_message_init(&msg);
+ spi_message_add_tail(&txfr, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static int cam_spi_txfr_read(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int txlen, int rxlen)
+{
+ struct spi_transfer tx;
+ struct spi_transfer rx;
+ struct spi_message m;
+
+ memset(&tx, 0, sizeof(tx));
+ memset(&rx, 0, sizeof(rx));
+ tx.tx_buf = txbuf;
+ rx.rx_buf = rxbuf;
+ tx.len = txlen;
+ rx.len = rxlen;
+ spi_message_init(&m);
+ spi_message_add_tail(&tx, &m);
+ spi_message_add_tail(&rx, &m);
+ return spi_sync(spi, &m);
+}
+
+/**
+ * cam_set_addr() - helper function to set transfer address
+ * @addr: device address
+ * @addr_len: the addr field length of an instruction
+ * @type: type (i.e. byte-length) of @addr
+ * @str: shifted address output, must be zeroed when passed in
+ *
+ * This helper function sets @str based on the addr field length of an
+ * instruction and the data length.
+ */
+static void cam_set_addr(uint32_t addr, uint8_t addr_len,
+ enum camera_sensor_i2c_type addr_type,
+ char *str)
+{
+ if (!addr_len)
+ return;
+
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ str[0] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ str[0] = addr >> 8;
+ str[1] = addr;
+ } else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ str[0] = addr >> 16;
+ str[1] = addr >> 8;
+ str[2] = addr;
+ } else {
+ str[0] = addr >> 24;
+ str[1] = addr >> 16;
+ str[2] = addr >> 8;
+ str[3] = addr;
+ }
+}
+
+/**
+ * cam_spi_tx_helper() - wrapper for SPI transaction
+ * @client: io client
+ * @inst: inst of this transaction
+ * @addr: device addr following the inst
+ * @data: output byte array (could be NULL)
+ * @num_byte: size of @data
+ * @tx, rx: optional transfer buffer. It must be at least header
+ * + @num_byte long.
+ *
+ * This is the core function for SPI transaction, except for writes. It first
+ * checks address type, then allocates required memory for tx/rx buffers.
+ * It sends out <opcode><addr>, and optionally receives @num_byte of response,
+ * if @data is not NULL. This function does not check for wait conditions,
+ * and will return immediately once bus transaction finishes.
+ *
+ * This function will allocate buffers of header + @num_byte long. For
+ * large transfers, the allocation could fail. External buffer @tx, @rx
+ * should be passed in to bypass allocation. The size of buffer should be
+ * at least header + num_byte long. Since buffer is managed externally,
+ * @data will be ignored, and read results will be in @rx.
+ * @tx, @rx also can be used for repeated transfers to improve performance.
+ */
+static int32_t cam_spi_tx_helper(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ struct device *dev = NULL;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t len, hlen;
+ uint8_t retries = client->spi_client->retries;
+ uint32_t txr = 0, rxr = 0;
+ struct page *page_tx = NULL, *page_rx = NULL;
+
+ hlen = cam_camera_spi_get_hlen(inst);
+ len = hlen + num_byte;
+ dev = &(spi->dev);
+
+ if (!dev) {
+ CAM_ERR(CAM_SENSOR, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (tx) {
+ ctx = tx;
+ } else {
+ txr = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ page_tx = cma_alloc(dev_get_cma_area(dev),
+ txr, 0);
+ if (!page_tx)
+ return -ENOMEM;
+
+ ctx = page_address(page_tx);
+ }
+
+ if (num_byte) {
+ if (rx) {
+ crx = rx;
+ } else {
+ rxr = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ page_rx = cma_alloc(dev_get_cma_area(dev),
+ rxr, 0);
+ if (!page_rx) {
+ if (!tx)
+ cma_release(dev_get_cma_area(dev),
+ page_tx, txr);
+
+ return -ENOMEM;
+ }
+ crx = page_address(page_rx);
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+ while ((rc = cam_spi_txfr(spi, ctx, crx, len)) && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: spi txfr rc %d", rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx + hlen, num_byte);
+
+out:
+ if (!tx)
+ cma_release(dev_get_cma_area(dev), page_tx, txr);
+ if (!rx)
+ cma_release(dev_get_cma_area(dev), page_rx, rxr);
+ return rc;
+}
+
+static int32_t cam_spi_tx_read(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t hlen;
+ uint8_t retries = client->spi_client->retries;
+
+ hlen = cam_camera_spi_get_hlen(inst);
+ if (tx) {
+ ctx = tx;
+ } else {
+ ctx = kzalloc(hlen, GFP_KERNEL | GFP_DMA);
+ if (!ctx)
+ return -ENOMEM;
+ }
+ if (num_byte) {
+ if (rx) {
+ crx = rx;
+ } else {
+ crx = kzalloc(num_byte, GFP_KERNEL | GFP_DMA);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+
+ CAM_DBG(CAM_EEPROM, "tx(%u): %02x %02x %02x %02x", hlen, ctx[0],
+ ctx[1], ctx[2], ctx[3]);
+ while ((rc = cam_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
+ && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx, num_byte);
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type)
+{
+ int rc = -EINVAL;
+ uint8_t temp[CAMERA_SENSOR_I2C_TYPE_MAX];
+
+ if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+ || data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+ CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verification");
+ return rc;
+ }
+
+ rc = cam_spi_tx_read(client,
+ &client->spi_client->cmd_tbl.read, addr, &temp[0],
+ addr_type, data_type, NULL, NULL);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ return rc;
+ }
+
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+ *data = temp[0];
+ else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+ *data = (temp[0] << BITS_PER_BYTE) | temp[1];
+ else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+ *data = (temp[0] << 16 | temp[1] << 8 | temp[2]);
+ else
+ *data = (temp[0] << 24 | temp[1] << 16 | temp[2] << 8 |
+ temp[3]);
+
+ CAM_DBG(CAM_SENSOR, "addr 0x%x, data %u", addr, *data);
+ return rc;
+}
+
+int32_t cam_spi_read_seq(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+{
+ if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+ || (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) {
+ CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
+ return -EINVAL;
+ }
+
+ if (num_bytes == 0) {
+ CAM_ERR(CAM_SENSOR, "num_byte: 0x%x", num_bytes);
+ return -EINVAL;
+ }
+
+ return cam_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.read_seq, addr, data,
+ addr_type, num_bytes, NULL, NULL);
+}
+
+int cam_spi_query_id(struct camera_io_master *client,
+ uint32_t addr, enum camera_sensor_i2c_type addr_type,
+ uint8_t *data, uint32_t num_byte)
+{
+ return cam_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.query_id,
+ addr, data, addr_type, num_byte, NULL, NULL);
+}
+
+static int32_t cam_spi_read_status_reg(
+ struct camera_io_master *client, uint8_t *status,
+ enum camera_sensor_i2c_type addr_type)
+{
+ struct cam_camera_spi_inst *rs =
+ &client->spi_client->cmd_tbl.read_status;
+
+ if (rs->addr_len != 0) {
+ CAM_ERR(CAM_SENSOR, "not implemented yet");
+ return -ENXIO;
+ }
+ return cam_spi_tx_helper(client, rs, 0, status,
+ addr_type, 1, NULL, NULL);
+}
+
+static int32_t cam_spi_device_busy(struct camera_io_master *client,
+ uint8_t *busy, enum camera_sensor_i2c_type addr_type)
+{
+ int rc;
+ uint8_t st = 0;
+
+ rc = cam_spi_read_status_reg(client, &st, addr_type);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed to read status reg");
+ return rc;
+ }
+ *busy = st & client->spi_client->busy_mask;
+ return 0;
+}
+
+static int32_t cam_spi_wait(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst,
+ enum camera_sensor_i2c_type addr_type)
+{
+ uint8_t busy;
+ int i, rc;
+
+ CAM_DBG(CAM_SENSOR, "op 0x%x wait start", inst->opcode);
+ for (i = 0; i < inst->delay_count; i++) {
+ rc = cam_spi_device_busy(client, &busy, addr_type);
+ if (rc < 0)
+ return rc;
+ if (!busy)
+ break;
+ msleep(inst->delay_intv);
+ CAM_DBG(CAM_SENSOR, "op 0x%x wait", inst->opcode);
+ }
+ if (i > inst->delay_count) {
+ CAM_ERR(CAM_SENSOR, "op %x timed out", inst->opcode);
+ return -ETIMEDOUT;
+ }
+ CAM_DBG(CAM_SENSOR, "op %x finished", inst->opcode);
+ return 0;
+}
+
+static int32_t cam_spi_write_enable(struct camera_io_master *client,
+ enum camera_sensor_i2c_type addr_type)
+{
+ struct cam_camera_spi_inst *we =
+ &client->spi_client->cmd_tbl.write_enable;
+ int rc;
+
+ if (we->opcode == 0)
+ return 0;
+ if (we->addr_len != 0) {
+ CAM_ERR(CAM_SENSOR, "not implemented yet");
+ return -EINVAL;
+ }
+ rc = cam_spi_tx_helper(client, we, 0, NULL, addr_type,
+ 0, NULL, NULL);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "write enable failed");
+ return rc;
+}
+
+/**
+ * cam_spi_page_program() - core function to perform write
+ * @client: need for obtaining SPI device
+ * @addr: address to program on device
+ * @data: data to write
+ * @len: size of data
+ * @tx: tx buffer, size >= header + len
+ *
+ * This function performs SPI write, and has no boundary check. Writing range
+ * should not cross page boundary, or data will be corrupted. Transaction is
+ * guaranteed to be finished when it returns. This function should never be
+ * used outside cam_spi_write_seq().
+ */
+static int32_t cam_spi_page_program(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint16_t len, uint8_t *tx)
+{
+ int rc;
+ struct cam_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ struct spi_device *spi = client->spi_client->spi_master;
+ uint8_t retries = client->spi_client->retries;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+
+ CAM_DBG(CAM_SENSOR, "addr 0x%x, size 0x%x", addr, len);
+ rc = cam_spi_write_enable(client, addr_type);
+ if (rc < 0)
+ return rc;
+ memset(tx, 0, header_len);
+ tx[0] = pg->opcode;
+ cam_set_addr(addr, pg->addr_len, addr_type, tx + 1);
+ memcpy(tx + header_len, data, len);
+ CAM_DBG(CAM_SENSOR, "tx(%u): %02x %02x %02x %02x",
+ len, tx[0], tx[1], tx[2], tx[3]);
+ while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
+ rc = cam_spi_wait(client, pg, addr_type);
+ msleep(client->spi_client->retry_delay);
+ retries--;
+ }
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ return rc;
+ }
+ rc = cam_spi_wait(client, pg, addr_type);
+ return rc;
+}
+
+int cam_spi_write(struct camera_io_master *client,
+ uint32_t addr, uint32_t data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type)
+{
+ struct cam_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint16_t len = 0;
+ char buf[CAMERA_SENSOR_I2C_TYPE_MAX];
+ char *tx;
+ int rc = -EINVAL;
+
+ if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+ || (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+ || (data_type != CAMERA_SENSOR_I2C_TYPE_MAX))
+ return rc;
+
+ CAM_DBG(CAM_EEPROM, "Data: 0x%x", data);
+ len = header_len + (uint8_t)data_type;
+ tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx)
+ goto NOMEM;
+
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = data;
+ CAM_DBG(CAM_EEPROM, "Byte %d: 0x%x", len, buf[0]);
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
+ buf[1] = (data & 0x00FF);
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ buf[0] = (data >> 16) & 0x00FF;
+ buf[1] = (data >> 8) & 0x00FF;
+ buf[2] = (data & 0x00FF);
+ } else {
+ buf[0] = (data >> 24) & 0x00FF;
+ buf[1] = (data >> 16) & 0x00FF;
+ buf[2] = (data >> 8) & 0x00FF;
+ buf[3] = (data & 0x00FF);
+ }
+
+ rc = cam_spi_page_program(client, addr, buf,
+ addr_type, data_type, tx);
+ if (rc < 0)
+ goto ERROR;
+ goto OUT;
+NOMEM:
+ CAM_ERR(CAM_SENSOR, "memory allocation failed");
+ return -ENOMEM;
+ERROR:
+ CAM_ERR(CAM_SENSOR, "error write");
+OUT:
+ kfree(tx);
+ return rc;
+}
+
+int32_t cam_spi_write_seq(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type, uint32_t num_byte)
+{
+ struct cam_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ const uint32_t page_size = client->spi_client->page_size;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint16_t len;
+ uint32_t cur_len, end;
+ char *tx, *pdata = data;
+ int rc = -EINVAL;
+
+ if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) ||
+ (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID))
+ return rc;
+ /* single page write */
+ if ((addr % page_size) + num_byte <= page_size) {
+ len = header_len + num_byte;
+ tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx)
+ goto NOMEM;
+ rc = cam_spi_page_program(client, addr, data, addr_type,
+ num_byte, tx);
+ if (rc < 0)
+ goto ERROR;
+ goto OUT;
+ }
+ /* multi page write */
+ len = header_len + page_size;
+ tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx)
+ goto NOMEM;
+ while (num_byte) {
+ end = min(page_size, (addr % page_size) + num_byte);
+ cur_len = end - (addr % page_size);
+ CAM_ERR(CAM_SENSOR, "Addr: 0x%x curr_len: 0x%x pgSize: %d",
+ addr, cur_len, page_size);
+ rc = cam_spi_page_program(client, addr, pdata, addr_type,
+ cur_len, tx);
+ if (rc < 0)
+ goto ERROR;
+ addr += cur_len;
+ pdata += cur_len;
+ num_byte -= cur_len;
+ }
+ goto OUT;
+NOMEM:
+ pr_err("%s: memory allocation failed\n", __func__);
+ return -ENOMEM;
+ERROR:
+ pr_err("%s: error write\n", __func__);
+OUT:
+ kfree(tx);
+ return rc;
+}
+
+int cam_spi_write_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ int i;
+ int rc = -EFAULT;
+ struct cam_sensor_i2c_reg_array *reg_setting;
+ uint16_t client_addr_type;
+ enum camera_sensor_i2c_type addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+ || (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+ || (write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+ return rc;
+
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = write_setting->addr_type;
+ addr_type = write_setting->addr_type;
+ for (i = 0; i < write_setting->size; i++) {
+ CAM_DBG(CAM_SENSOR, "addr %x data %x",
+ reg_setting->reg_addr, reg_setting->reg_data);
+ rc = cam_spi_write(client,
+ reg_setting->reg_addr, reg_setting->reg_data,
+ write_setting->addr_type, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000,
+ (write_setting->delay
+ * 1000) + 1000);
+ addr_type = client_addr_type;
+ return rc;
+}
+
+int cam_spi_erase(struct camera_io_master *client, uint32_t addr,
+ enum camera_sensor_i2c_type addr_type, uint32_t size)
+{
+ struct cam_camera_spi_inst *se = &client->spi_client->cmd_tbl.erase;
+ int rc = 0;
+ uint32_t cur;
+ uint32_t end = addr + size;
+ uint32_t erase_size = client->spi_client->erase_size;
+
+ end = addr + size;
+ for (cur = rounddown(addr, erase_size); cur < end; cur += erase_size) {
+ CAM_ERR(CAM_SENSOR, "%s: erasing 0x%x size: %d\n",
+ __func__, cur, erase_size);
+ rc = cam_spi_write_enable(client, addr_type);
+ if (rc < 0)
+ return rc;
+ rc = cam_spi_tx_helper(client, se, cur, NULL, addr_type, 0,
+ NULL, NULL);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "%s: erase failed\n", __func__);
+ return rc;
+ }
+ rc = cam_spi_wait(client, se, addr_type);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "%s: erase timedout\n", __func__);
+ return rc;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
new file mode 100644
index 000000000000..ae876194b0fe
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_SPI_H_
+#define _CAM_SENSOR_SPI_H_
+
+#include <linux/spi/spi.h>
+#include <linux/cma.h>
+#include <linux/dma-contiguous.h>
+#include <media/cam_sensor.h>
+#include "cam_sensor_i2c.h"
+
+#define MAX_SPI_SIZE 110
+#define SPI_DYNAMIC_ALLOC
+
+struct cam_camera_spi_inst {
+ uint8_t opcode;
+ uint8_t addr_len;
+ uint8_t dummy_len;
+ uint8_t delay_intv;
+ uint8_t delay_count;
+};
+
+struct cam_spi_write_burst_data {
+ u8 data_msb;
+ u8 data_lsb;
+};
+
+struct cam_spi_write_burst_packet {
+ u8 cmd;
+ u8 addr_msb;
+ u8 addr_lsb;
+ struct cam_spi_write_burst_data data_arr[MAX_SPI_SIZE];
+};
+
+struct cam_camera_burst_info {
+ uint32_t burst_addr;
+ uint32_t burst_start;
+ uint32_t burst_len;
+ uint32_t chunk_size;
+};
+
+struct cam_camera_spi_inst_tbl {
+ struct cam_camera_spi_inst read;
+ struct cam_camera_spi_inst read_seq;
+ struct cam_camera_spi_inst query_id;
+ struct cam_camera_spi_inst page_program;
+ struct cam_camera_spi_inst write_enable;
+ struct cam_camera_spi_inst read_status;
+ struct cam_camera_spi_inst erase;
+};
+
+struct cam_sensor_spi_client {
+ struct spi_device *spi_master;
+ struct cam_camera_spi_inst_tbl cmd_tbl;
+ uint8_t device_id0;
+ uint8_t device_id1;
+ uint8_t mfr_id0;
+ uint8_t mfr_id1;
+ uint8_t retry_delay;
+ uint8_t retries;
+ uint8_t busy_mask;
+ uint16_t page_size;
+ uint32_t erase_size;
+};
+static __always_inline
+uint16_t cam_camera_spi_get_hlen(struct cam_camera_spi_inst *inst)
+{
+ return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type);
+
+int cam_spi_read_seq(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ int32_t num_bytes);
+
+int cam_spi_query_id(struct camera_io_master *client,
+ uint32_t addr,
+ enum camera_sensor_i2c_type addr_type,
+ uint8_t *data, uint32_t num_byte);
+
+int cam_spi_write(struct camera_io_master *client,
+ uint32_t addr, uint32_t data,
+ enum camera_sensor_i2c_type addr_type,
+ enum camera_sensor_i2c_type data_type);
+
+int cam_spi_write_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting);
+
+int cam_spi_erase(struct camera_io_master *client,
+ uint32_t addr, enum camera_sensor_i2c_type addr_type,
+ uint32_t size);
+
+int cam_spi_write_seq(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type, uint32_t num_byte);
+#endif
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/Makefile
new file mode 100644
index 000000000000..52748108df01
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
new file mode 100644
index 000000000000..1e9ec3db054a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -0,0 +1,390 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_CMN_HEADER_
+#define _CAM_SENSOR_CMN_HEADER_
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <media/cam_sensor.h>
+#include <media/cam_req_mgr.h>
+
+#define MAX_REGULATOR 5
+#define MAX_POWER_CONFIG 12
+
+#define MAX_PER_FRAME_ARRAY 32
+#define BATCH_SIZE_MAX 16
+
+#define CAM_SENSOR_NAME "cam-sensor"
+#define CAM_ACTUATOR_NAME "cam-actuator"
+#define CAM_CSIPHY_NAME "cam-csiphy"
+#define CAM_FLASH_NAME "cam-flash"
+#define CAM_EEPROM_NAME "cam-eeprom"
+#define CAM_OIS_NAME "cam-ois"
+
+#define MAX_SYSTEM_PIPELINE_DELAY 2
+
+#define CAM_PKT_NOP_OPCODE 127
+
+enum camera_sensor_cmd_type {
+ CAMERA_SENSOR_CMD_TYPE_INVALID,
+ CAMERA_SENSOR_CMD_TYPE_PROBE,
+ CAMERA_SENSOR_CMD_TYPE_PWR_UP,
+ CAMERA_SENSOR_CMD_TYPE_PWR_DOWN,
+ CAMERA_SENSOR_CMD_TYPE_I2C_INFO,
+ CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR,
+ CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_RD,
+ CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR,
+ CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD,
+ CAMERA_SENSOR_CMD_TYPE_WAIT,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET,
+ CAMERA_SENSOR_CMD_TYPE_RD_DATA,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE,
+ CAMERA_SENSOR_CMD_TYPE_MAX,
+};
+
+enum camera_sensor_i2c_op_code {
+ CAMERA_SENSOR_I2C_OP_INVALID,
+ CAMERA_SENSOR_I2C_OP_RNDM_WR,
+ CAMERA_SENSOR_I2C_OP_RNDM_WR_VERF,
+ CAMERA_SENSOR_I2C_OP_CONT_WR_BRST,
+ CAMERA_SENSOR_I2C_OP_CONT_WR_BRST_VERF,
+ CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN,
+ CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN_VERF,
+ CAMERA_SENSOR_I2C_OP_MAX,
+};
+
+enum camera_sensor_wait_op_code {
+ CAMERA_SENSOR_WAIT_OP_INVALID,
+ CAMERA_SENSOR_WAIT_OP_COND,
+ CAMERA_SENSOR_WAIT_OP_HW_UCND,
+ CAMERA_SENSOR_WAIT_OP_SW_UCND,
+ CAMERA_SENSOR_WAIT_OP_MAX,
+};
+
+enum camera_flash_opcode {
+ CAMERA_SENSOR_FLASH_OP_INVALID,
+ CAMERA_SENSOR_FLASH_OP_OFF,
+ CAMERA_SENSOR_FLASH_OP_FIRELOW,
+ CAMERA_SENSOR_FLASH_OP_FIREHIGH,
+ CAMERA_SENSOR_FLASH_OP_MAX,
+};
+
+enum camera_sensor_i2c_type {
+ CAMERA_SENSOR_I2C_TYPE_INVALID,
+ CAMERA_SENSOR_I2C_TYPE_BYTE,
+ CAMERA_SENSOR_I2C_TYPE_WORD,
+ CAMERA_SENSOR_I2C_TYPE_3B,
+ CAMERA_SENSOR_I2C_TYPE_DWORD,
+ CAMERA_SENSOR_I2C_TYPE_MAX,
+};
+
+enum i2c_freq_mode {
+ I2C_STANDARD_MODE,
+ I2C_FAST_MODE,
+ I2C_CUSTOM_MODE,
+ I2C_FAST_PLUS_MODE,
+ I2C_MAX_MODES,
+};
+
+enum position_roll {
+ ROLL_0 = 0,
+ ROLL_90 = 90,
+ ROLL_180 = 180,
+ ROLL_270 = 270,
+ ROLL_INVALID = 360,
+};
+
+enum position_yaw {
+ FRONT_CAMERA_YAW = 0,
+ REAR_CAMERA_YAW = 180,
+ INVALID_YAW = 360,
+};
+
+enum position_pitch {
+ LEVEL_PITCH = 0,
+ INVALID_PITCH = 360,
+};
+
+enum sensor_sub_module {
+ SUB_MODULE_SENSOR,
+ SUB_MODULE_ACTUATOR,
+ SUB_MODULE_EEPROM,
+ SUB_MODULE_LED_FLASH,
+ SUB_MODULE_CSID,
+ SUB_MODULE_CSIPHY,
+ SUB_MODULE_OIS,
+ SUB_MODULE_EXT,
+ SUB_MODULE_MAX,
+};
+
+enum msm_camera_power_seq_type {
+ SENSOR_MCLK,
+ SENSOR_VANA,
+ SENSOR_VDIG,
+ SENSOR_VIO,
+ SENSOR_VAF,
+ SENSOR_VAF_PWDM,
+ SENSOR_CUSTOM_REG1,
+ SENSOR_CUSTOM_REG2,
+ SENSOR_RESET,
+ SENSOR_STANDBY,
+ SENSOR_CUSTOM_GPIO1,
+ SENSOR_CUSTOM_GPIO2,
+ SENSOR_SEQ_TYPE_MAX,
+};
+
+enum cam_sensor_packet_opcodes {
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP = 127
+};
+
+enum cam_actuator_packet_opcodes {
+ CAM_ACTUATOR_PACKET_OPCODE_INIT,
+ CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS,
+ CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS
+};
+
+enum cam_eeprom_packet_opcodes {
+ CAM_EEPROM_PACKET_OPCODE_INIT
+};
+
+enum cam_ois_packet_opcodes {
+ CAM_OIS_PACKET_OPCODE_INIT,
+ CAM_OIS_PACKET_OPCODE_OIS_CONTROL
+};
+
+enum msm_bus_perf_setting {
+ S_INIT,
+ S_PREVIEW,
+ S_VIDEO,
+ S_CAPTURE,
+ S_ZSL,
+ S_STEREO_VIDEO,
+ S_STEREO_CAPTURE,
+ S_DEFAULT,
+ S_LIVESHOT,
+ S_DUAL,
+ S_EXIT
+};
+
+enum msm_camera_device_type_t {
+ MSM_CAMERA_I2C_DEVICE,
+ MSM_CAMERA_PLATFORM_DEVICE,
+ MSM_CAMERA_SPI_DEVICE,
+};
+
+enum cam_flash_device_type {
+ CAMERA_FLASH_DEVICE_TYPE_PMIC = 0,
+ CAMERA_FLASH_DEVICE_TYPE_I2C,
+ CAMERA_FLASH_DEVICE_TYPE_GPIO,
+};
+
+enum cci_i2c_master_t {
+ MASTER_0,
+ MASTER_1,
+ MASTER_MAX,
+};
+
+enum cci_device_num {
+ CCI_DEVICE_0,
+ CCI_DEVICE_1,
+ CCI_DEVICE_MAX,
+};
+
+enum camera_vreg_type {
+ VREG_TYPE_DEFAULT,
+ VREG_TYPE_CUSTOM,
+};
+
+enum cam_sensor_i2c_cmd_type {
+ CAM_SENSOR_I2C_WRITE_RANDOM,
+ CAM_SENSOR_I2C_WRITE_BURST,
+ CAM_SENSOR_I2C_WRITE_SEQ,
+ CAM_SENSOR_I2C_READ,
+ CAM_SENSOR_I2C_POLL
+};
+
+struct common_header {
+ uint16_t first_word;
+ uint8_t third_byte;
+ uint8_t cmd_type;
+};
+
+struct camera_vreg_t {
+ const char *reg_name;
+ int min_voltage;
+ int max_voltage;
+ int op_mode;
+ uint32_t delay;
+ const char *custom_vreg_name;
+ enum camera_vreg_type type;
+};
+
+struct msm_camera_gpio_num_info {
+ uint16_t gpio_num[SENSOR_SEQ_TYPE_MAX];
+ uint8_t valid[SENSOR_SEQ_TYPE_MAX];
+};
+
+struct msm_cam_clk_info {
+ const char *clk_name;
+ long clk_rate;
+ uint32_t delay;
+};
+
+struct msm_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ bool use_pinctrl;
+};
+
+struct cam_sensor_i2c_reg_array {
+ uint32_t reg_addr;
+ uint32_t reg_data;
+ uint32_t delay;
+ uint32_t data_mask;
+};
+
+struct cam_sensor_i2c_reg_setting {
+ struct cam_sensor_i2c_reg_array *reg_setting;
+ unsigned short size;
+ enum camera_sensor_i2c_type addr_type;
+ enum camera_sensor_i2c_type data_type;
+ unsigned short delay;
+};
+
+struct i2c_settings_list {
+ struct cam_sensor_i2c_reg_setting i2c_settings;
+ enum cam_sensor_i2c_cmd_type op_code;
+ struct list_head list;
+};
+
+struct i2c_settings_array {
+ struct list_head list_head;
+ int32_t is_settings_valid;
+ int64_t request_id;
+};
+
+struct i2c_data_settings {
+ struct i2c_settings_array init_settings;
+ struct i2c_settings_array config_settings;
+ struct i2c_settings_array streamon_settings;
+ struct i2c_settings_array streamoff_settings;
+ struct i2c_settings_array *per_frame;
+};
+
+struct cam_sensor_power_ctrl_t {
+ struct device *dev;
+ struct cam_sensor_power_setting *power_setting;
+ uint16_t power_setting_size;
+ struct cam_sensor_power_setting *power_down_setting;
+ uint16_t power_down_setting_size;
+ struct msm_camera_gpio_num_info *gpio_num_info;
+ struct msm_pinctrl_info pinctrl_info;
+ uint8_t cam_pinctrl_status;
+};
+
+struct cam_camera_slave_info {
+ uint16_t sensor_slave_addr;
+ uint16_t sensor_id_reg_addr;
+ uint16_t sensor_id;
+ uint16_t sensor_id_mask;
+};
+
+struct msm_sensor_init_params {
+ int modes_supported;
+ unsigned int sensor_mount_angle;
+};
+
+enum msm_sensor_camera_id_t {
+ CAMERA_0,
+ CAMERA_1,
+ CAMERA_2,
+ CAMERA_3,
+ MAX_CAMERAS,
+};
+
+struct msm_sensor_id_info_t {
+ unsigned short sensor_id_reg_addr;
+ unsigned short sensor_id;
+ unsigned short sensor_id_mask;
+};
+
+enum msm_sensor_output_format_t {
+ MSM_SENSOR_BAYER,
+ MSM_SENSOR_YCBCR,
+ MSM_SENSOR_META,
+};
+
+struct cam_sensor_power_setting {
+ enum msm_camera_power_seq_type seq_type;
+ unsigned short seq_val;
+ long config_val;
+ unsigned short delay;
+ void *data[10];
+};
+
+struct cam_sensor_board_info {
+ struct cam_camera_slave_info slave_info;
+ int32_t sensor_mount_angle;
+ int32_t secure_mode;
+ int modes_supported;
+ int32_t pos_roll;
+ int32_t pos_yaw;
+ int32_t pos_pitch;
+ int32_t subdev_id[SUB_MODULE_MAX];
+ int32_t subdev_intf[SUB_MODULE_MAX];
+ const char *misc_regulator;
+ struct cam_sensor_power_ctrl_t power_info;
+};
+
+enum msm_camera_vreg_name_t {
+ CAM_VDIG,
+ CAM_VIO,
+ CAM_VANA,
+ CAM_VAF,
+ CAM_V_CUSTOM1,
+ CAM_V_CUSTOM2,
+ CAM_VREG_MAX,
+};
+
+struct msm_camera_gpio_conf {
+ void *cam_gpiomux_conf_tbl;
+ uint8_t cam_gpiomux_conf_tbl_size;
+ struct gpio *cam_gpio_common_tbl;
+ uint8_t cam_gpio_common_tbl_size;
+ struct gpio *cam_gpio_req_tbl;
+ uint8_t cam_gpio_req_tbl_size;
+ uint32_t gpio_no_mux;
+ uint32_t *camera_off_table;
+ uint8_t camera_off_table_size;
+ uint32_t *camera_on_table;
+ uint8_t camera_on_table_size;
+ struct msm_camera_gpio_num_info *gpio_num_info;
+};
+
+#endif /* _CAM_SENSOR_CMN_HEADER_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
new file mode 100644
index 000000000000..713bcaa8643a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -0,0 +1,1944 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "cam_sensor_util.h"
+#include <cam_mem_mgr.h>
+#include "cam_res_mgr_api.h"
+
+#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
+
+#define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \
+ (config_val >= min) && (config_val <= max))
+
+static struct i2c_settings_list*
+ cam_sensor_get_i2c_ptr(struct i2c_settings_array *i2c_reg_settings,
+ uint32_t size)
+{
+ struct i2c_settings_list *tmp;
+
+ tmp = (struct i2c_settings_list *)
+ kzalloc(sizeof(struct i2c_settings_list), GFP_KERNEL);
+
+ if (tmp != NULL)
+ list_add_tail(&(tmp->list),
+ &(i2c_reg_settings->list_head));
+ else
+ return NULL;
+
+ tmp->i2c_settings.reg_setting = (struct cam_sensor_i2c_reg_array *)
+ kcalloc(size, sizeof(struct cam_sensor_i2c_reg_array),
+ GFP_KERNEL);
+ if (tmp->i2c_settings.reg_setting == NULL) {
+ list_del(&(tmp->list));
+ kfree(tmp);
+ return NULL;
+ }
+ tmp->i2c_settings.size = size;
+
+ return tmp;
+}
+
+int32_t delete_request(struct i2c_settings_array *i2c_array)
+{
+ struct i2c_settings_list *i2c_list = NULL, *i2c_next = NULL;
+ int32_t rc = 0;
+
+ if (i2c_array == NULL) {
+ CAM_ERR(CAM_SENSOR, "FATAL:: Invalid argument");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(i2c_list, i2c_next,
+ &(i2c_array->list_head), list) {
+ kfree(i2c_list->i2c_settings.reg_setting);
+ list_del(&(i2c_list->list));
+ kfree(i2c_list);
+ }
+ INIT_LIST_HEAD(&(i2c_array->list_head));
+ i2c_array->is_settings_valid = 0;
+
+ return rc;
+}
+
+int32_t cam_sensor_handle_delay(
+ uint32_t **cmd_buf,
+ uint16_t generic_op_code,
+ struct i2c_settings_array *i2c_reg_settings,
+ uint32_t offset, uint32_t *byte_cnt,
+ struct list_head *list_ptr)
+{
+ int32_t rc = 0;
+ struct cam_cmd_unconditional_wait *cmd_uncond_wait =
+ (struct cam_cmd_unconditional_wait *) *cmd_buf;
+ struct i2c_settings_list *i2c_list = NULL;
+
+ if (list_ptr == NULL) {
+ CAM_ERR(CAM_SENSOR, "Invalid list ptr");
+ return -EINVAL;
+ }
+
+ if (offset > 0) {
+ i2c_list =
+ list_entry(list_ptr, struct i2c_settings_list, list);
+ if (generic_op_code ==
+ CAMERA_SENSOR_WAIT_OP_HW_UCND)
+ i2c_list->i2c_settings.reg_setting[offset - 1].delay =
+ cmd_uncond_wait->delay;
+ else
+ i2c_list->i2c_settings.delay = cmd_uncond_wait->delay;
+ (*cmd_buf) +=
+ sizeof(
+ struct cam_cmd_unconditional_wait) / sizeof(uint32_t);
+ (*byte_cnt) +=
+ sizeof(
+ struct cam_cmd_unconditional_wait);
+ } else {
+ CAM_ERR(CAM_SENSOR, "Delay Rxed Before any buffer: %d", offset);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int32_t cam_sensor_handle_poll(
+ uint32_t **cmd_buf,
+ struct i2c_settings_array *i2c_reg_settings,
+ uint32_t *byte_cnt, int32_t *offset,
+ struct list_head **list_ptr)
+{
+ struct i2c_settings_list *i2c_list;
+ int32_t rc = 0;
+ struct cam_cmd_conditional_wait *cond_wait
+ = (struct cam_cmd_conditional_wait *) *cmd_buf;
+
+ i2c_list =
+ cam_sensor_get_i2c_ptr(i2c_reg_settings, 1);
+ if (!i2c_list || !i2c_list->i2c_settings.reg_setting) {
+ CAM_ERR(CAM_SENSOR, "Failed in allocating mem for list");
+ return -ENOMEM;
+ }
+
+ i2c_list->op_code = CAM_SENSOR_I2C_POLL;
+ i2c_list->i2c_settings.data_type =
+ cond_wait->data_type;
+ i2c_list->i2c_settings.addr_type =
+ cond_wait->addr_type;
+ i2c_list->i2c_settings.reg_setting->reg_addr =
+ cond_wait->reg_addr;
+ i2c_list->i2c_settings.reg_setting->reg_data =
+ cond_wait->reg_data;
+ i2c_list->i2c_settings.reg_setting->delay =
+ cond_wait->timeout;
+
+ (*cmd_buf) += sizeof(struct cam_cmd_conditional_wait) /
+ sizeof(uint32_t);
+ (*byte_cnt) += sizeof(struct cam_cmd_conditional_wait);
+
+ *offset = 1;
+ *list_ptr = &(i2c_list->list);
+
+ return rc;
+}
+
+int32_t cam_sensor_handle_random_write(
+ struct cam_cmd_i2c_random_wr *cam_cmd_i2c_random_wr,
+ struct i2c_settings_array *i2c_reg_settings,
+ uint16_t *cmd_length_in_bytes, int32_t *offset,
+ struct list_head **list)
+{
+ struct i2c_settings_list *i2c_list;
+ int32_t rc = 0, cnt;
+
+ i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
+ cam_cmd_i2c_random_wr->header.count);
+ if (i2c_list == NULL ||
+ i2c_list->i2c_settings.reg_setting == NULL) {
+ CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
+ return -ENOMEM;
+ }
+
+ *cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
+ sizeof(struct i2c_random_wr_payload) *
+ (cam_cmd_i2c_random_wr->header.count));
+ i2c_list->op_code = CAM_SENSOR_I2C_WRITE_RANDOM;
+ i2c_list->i2c_settings.addr_type =
+ cam_cmd_i2c_random_wr->header.addr_type;
+ i2c_list->i2c_settings.data_type =
+ cam_cmd_i2c_random_wr->header.data_type;
+
+ for (cnt = 0; cnt < (cam_cmd_i2c_random_wr->header.count);
+ cnt++) {
+ i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
+ cam_cmd_i2c_random_wr->random_wr_payload[cnt].reg_addr;
+ i2c_list->i2c_settings.reg_setting[cnt].reg_data =
+ cam_cmd_i2c_random_wr->random_wr_payload[cnt].reg_data;
+ i2c_list->i2c_settings.reg_setting[cnt].data_mask = 0;
+ }
+ *offset = cnt;
+ *list = &(i2c_list->list);
+
+ return rc;
+}
+
+static int32_t cam_sensor_handle_continuous_write(
+ struct cam_cmd_i2c_continuous_wr *cam_cmd_i2c_continuous_wr,
+ struct i2c_settings_array *i2c_reg_settings,
+ uint16_t *cmd_length_in_bytes, int32_t *offset,
+ struct list_head **list)
+{
+ struct i2c_settings_list *i2c_list;
+ int32_t rc = 0, cnt;
+
+ i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
+ cam_cmd_i2c_continuous_wr->header.count);
+ if (i2c_list == NULL ||
+ i2c_list->i2c_settings.reg_setting == NULL) {
+ CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
+ return -ENOMEM;
+ }
+
+ *cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
+ sizeof(cam_cmd_i2c_continuous_wr->reg_addr) +
+ sizeof(struct cam_cmd_read) *
+ (cam_cmd_i2c_continuous_wr->header.count));
+ if (cam_cmd_i2c_continuous_wr->header.op_code ==
+ CAMERA_SENSOR_I2C_OP_CONT_WR_BRST)
+ i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST;
+ else if (cam_cmd_i2c_continuous_wr->header.op_code ==
+ CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN)
+ i2c_list->op_code = CAM_SENSOR_I2C_WRITE_SEQ;
+ else
+ return -EINVAL;
+
+ i2c_list->i2c_settings.addr_type =
+ cam_cmd_i2c_continuous_wr->header.addr_type;
+ i2c_list->i2c_settings.data_type =
+ cam_cmd_i2c_continuous_wr->header.data_type;
+ i2c_list->i2c_settings.size =
+ cam_cmd_i2c_continuous_wr->header.count;
+
+ for (cnt = 0; cnt < (cam_cmd_i2c_continuous_wr->header.count);
+ cnt++) {
+ i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
+ cam_cmd_i2c_continuous_wr->reg_addr;
+ i2c_list->i2c_settings.reg_setting[cnt].reg_data =
+ cam_cmd_i2c_continuous_wr->data_read[cnt].reg_data;
+ i2c_list->i2c_settings.reg_setting[cnt].data_mask = 0;
+ }
+ *offset = cnt;
+ *list = &(i2c_list->list);
+
+ return rc;
+}
+
+static int cam_sensor_handle_slave_info(
+ struct camera_io_master *io_master,
+ uint32_t *cmd_buf)
+{
+ int rc = 0;
+ struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+
+ if (io_master == NULL || cmd_buf == NULL) {
+ CAM_ERR(CAM_SENSOR, "Invalid args");
+ return -EINVAL;
+ }
+
+ switch (io_master->master_type) {
+ case CCI_MASTER:
+ io_master->cci_client->sid = (i2c_info->slave_addr >> 1);
+ io_master->cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+ break;
+
+ case I2C_MASTER:
+ io_master->client->addr = i2c_info->slave_addr;
+ break;
+
+ case SPI_MASTER:
+ break;
+
+ default:
+ CAM_ERR(CAM_SENSOR, "Invalid master type: %d",
+ io_master->master_type);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * Name : cam_sensor_i2c_command_parser
+ * Description : Parse CSL CCI packet and apply register settings
+ * Parameters : s_ctrl input/output sub_device
+ * arg input cam_control
+ * Description :
+ * Handle multiple I2C RD/WR and WAIT cmd formats in one command
+ * buffer, for example, a command buffer of m x RND_WR + 1 x HW_
+ * WAIT + n x RND_WR with num_cmd_buf = 1. Do not exepect RD/WR
+ * with different cmd_type and op_code in one command buffer.
+ */
+int cam_sensor_i2c_command_parser(
+ struct camera_io_master *io_master,
+ struct i2c_settings_array *i2c_reg_settings,
+ struct cam_cmd_buf_desc *cmd_desc,
+ int32_t num_cmd_buffers)
+{
+ int16_t rc = 0, i = 0;
+ size_t len_of_buff = 0;
+ uintptr_t generic_ptr;
+ uint16_t cmd_length_in_bytes = 0;
+
+ for (i = 0; i < num_cmd_buffers; i++) {
+ uint32_t *cmd_buf = NULL;
+ struct common_header *cmm_hdr;
+ uint16_t generic_op_code;
+ uint32_t byte_cnt = 0;
+ uint32_t j = 0;
+ struct list_head *list = NULL;
+
+ /*
+ * It is not expected the same settings to
+ * be spread across multiple cmd buffers
+ */
+ CAM_DBG(CAM_SENSOR, "Total cmd Buf in Bytes: %d",
+ cmd_desc[i].length);
+
+ if (!cmd_desc[i].length)
+ continue;
+
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &generic_ptr, &len_of_buff);
+ cmd_buf = (uint32_t *)generic_ptr;
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "cmd hdl failed:%d, Err: %d, Buffer_len: %zd",
+ cmd_desc[i].mem_handle, rc, len_of_buff);
+ return rc;
+ }
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+
+ while (byte_cnt < cmd_desc[i].length) {
+ cmm_hdr = (struct common_header *)cmd_buf;
+ generic_op_code = cmm_hdr->third_byte;
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR: {
+ uint16_t cmd_length_in_bytes = 0;
+ struct cam_cmd_i2c_random_wr
+ *cam_cmd_i2c_random_wr =
+ (struct cam_cmd_i2c_random_wr *)cmd_buf;
+
+ rc = cam_sensor_handle_random_write(
+ cam_cmd_i2c_random_wr,
+ i2c_reg_settings,
+ &cmd_length_in_bytes, &j, &list);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed in random write %d", rc);
+ return rc;
+ }
+
+ cmd_buf += cmd_length_in_bytes /
+ sizeof(uint32_t);
+ byte_cnt += cmd_length_in_bytes;
+ break;
+ }
+ case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR: {
+ uint16_t cmd_length_in_bytes = 0;
+ struct cam_cmd_i2c_continuous_wr
+ *cam_cmd_i2c_continuous_wr =
+ (struct cam_cmd_i2c_continuous_wr *)
+ cmd_buf;
+
+ rc = cam_sensor_handle_continuous_write(
+ cam_cmd_i2c_continuous_wr,
+ i2c_reg_settings,
+ &cmd_length_in_bytes, &j, &list);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed in continuous write %d", rc);
+ return rc;
+ }
+
+ cmd_buf += cmd_length_in_bytes /
+ sizeof(uint32_t);
+ byte_cnt += cmd_length_in_bytes;
+ break;
+ }
+ case CAMERA_SENSOR_CMD_TYPE_WAIT: {
+ if (generic_op_code ==
+ CAMERA_SENSOR_WAIT_OP_HW_UCND ||
+ generic_op_code ==
+ CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+
+ rc = cam_sensor_handle_delay(
+ &cmd_buf, generic_op_code,
+ i2c_reg_settings, j, &byte_cnt,
+ list);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "delay hdl failed: %d",
+ rc);
+ return rc;
+ }
+
+ } else if (generic_op_code ==
+ CAMERA_SENSOR_WAIT_OP_COND) {
+ rc = cam_sensor_handle_poll(
+ &cmd_buf, i2c_reg_settings,
+ &byte_cnt, &j, &list);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Random read fail: %d",
+ rc);
+ return rc;
+ }
+ } else {
+ CAM_ERR(CAM_SENSOR,
+ "Wrong Wait Command: %d",
+ generic_op_code);
+ return -EINVAL;
+ }
+ break;
+ }
+ case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: {
+ rc = cam_sensor_handle_slave_info(
+ io_master, cmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_SENSOR,
+ "Handle slave info failed with rc: %d",
+ rc);
+ return rc;
+ }
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_i2c_info);
+ cmd_buf +=
+ cmd_length_in_bytes / sizeof(uint32_t);
+ byte_cnt += cmd_length_in_bytes;
+ break;
+ }
+ default:
+ CAM_ERR(CAM_SENSOR, "Invalid Command Type:%d",
+ cmm_hdr->cmd_type);
+ return -EINVAL;
+ }
+ }
+ i2c_reg_settings->is_settings_valid = 1;
+ }
+
+ return rc;
+}
+
+int cam_sensor_util_i2c_apply_setting(
+ struct camera_io_master *io_master_info,
+ struct i2c_settings_list *i2c_list)
+{
+ int32_t rc = 0;
+ uint32_t i, size;
+
+ switch (i2c_list->op_code) {
+ case CAM_SENSOR_I2C_WRITE_RANDOM: {
+ rc = camera_io_dev_write(io_master_info,
+ &(i2c_list->i2c_settings));
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to random write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ break;
+ }
+ case CAM_SENSOR_I2C_WRITE_SEQ: {
+ rc = camera_io_dev_write_continuous(
+ io_master_info, &(i2c_list->i2c_settings), 0);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to seq write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ break;
+ }
+ case CAM_SENSOR_I2C_WRITE_BURST: {
+ rc = camera_io_dev_write_continuous(
+ io_master_info, &(i2c_list->i2c_settings), 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to burst write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ break;
+ }
+ case CAM_SENSOR_I2C_POLL: {
+ size = i2c_list->i2c_settings.size;
+ for (i = 0; i < size; i++) {
+ rc = camera_io_dev_poll(
+ io_master_info,
+ i2c_list->i2c_settings.reg_setting[i].reg_addr,
+ i2c_list->i2c_settings.reg_setting[i].reg_data,
+ i2c_list->i2c_settings.reg_setting[i].data_mask,
+ i2c_list->i2c_settings.addr_type,
+ i2c_list->i2c_settings.data_type,
+ i2c_list->i2c_settings.reg_setting[i].delay);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "i2c poll apply setting Fail: %d", rc);
+ return rc;
+ }
+ }
+ break;
+ }
+ default:
+ CAM_ERR(CAM_SENSOR, "Wrong Opcode: %d", i2c_list->op_code);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+int32_t msm_camera_fill_vreg_params(
+ struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_setting *power_setting,
+ uint16_t power_setting_size)
+{
+ int32_t rc = 0, j = 0, i = 0;
+ int num_vreg;
+
+ /* Validate input parameters */
+ if (!soc_info || !power_setting) {
+ CAM_ERR(CAM_SENSOR, "failed: soc_info %pK power_setting %pK",
+ soc_info, power_setting);
+ return -EINVAL;
+ }
+
+ num_vreg = soc_info->num_rgltr;
+
+ if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+ CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < power_setting_size; i++) {
+
+ if (power_setting[i].seq_type < SENSOR_MCLK ||
+ power_setting[i].seq_type >= SENSOR_SEQ_TYPE_MAX) {
+ CAM_ERR(CAM_SENSOR, "failed: Invalid Seq type: %d",
+ power_setting[i].seq_type);
+ return -EINVAL;
+ }
+
+ switch (power_setting[i].seq_type) {
+ case SENSOR_VDIG:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vdig")) {
+
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vdig", i, j);
+ power_setting[i].seq_val = j;
+
+ if (VALIDATE_VOLTAGE(
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ power_setting[i].config_val)) {
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case SENSOR_VIO:
+ for (j = 0; j < num_vreg; j++) {
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vio")) {
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vio", i, j);
+ power_setting[i].seq_val = j;
+
+ if (VALIDATE_VOLTAGE(
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ power_setting[i].config_val)) {
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case SENSOR_VANA:
+ for (j = 0; j < num_vreg; j++) {
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vana")) {
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vana", i, j);
+ power_setting[i].seq_val = j;
+
+ if (VALIDATE_VOLTAGE(
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ power_setting[i].config_val)) {
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case SENSOR_VAF:
+ for (j = 0; j < num_vreg; j++) {
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vaf")) {
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vaf", i, j);
+ power_setting[i].seq_val = j;
+
+ if (VALIDATE_VOLTAGE(
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ power_setting[i].config_val)) {
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
+ power_setting[i].config_val;
+ }
+
+ break;
+ }
+
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case SENSOR_CUSTOM_REG1:
+ for (j = 0; j < num_vreg; j++) {
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_v_custom1")) {
+ CAM_DBG(CAM_SENSOR,
+ "i:%d j:%d cam_vcustom1", i, j);
+ power_setting[i].seq_val = j;
+
+ if (VALIDATE_VOLTAGE(
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ power_setting[i].config_val)) {
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+ case SENSOR_CUSTOM_REG2:
+ for (j = 0; j < num_vreg; j++) {
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_v_custom2")) {
+ CAM_DBG(CAM_SENSOR,
+ "i:%d j:%d cam_vcustom2", i, j);
+ power_setting[i].seq_val = j;
+
+ if (VALIDATE_VOLTAGE(
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ power_setting[i].config_val)) {
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int cam_sensor_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, int gpio_en)
+{
+ int rc = 0, i = 0;
+ uint8_t size = 0;
+ struct cam_soc_gpio_data *gpio_conf =
+ soc_info->gpio_data;
+ struct gpio *gpio_tbl = NULL;
+
+ if (!gpio_conf) {
+ CAM_INFO(CAM_SENSOR, "No GPIO data");
+ return 0;
+ }
+
+ if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+ CAM_INFO(CAM_SENSOR, "No GPIO entry");
+ return -EINVAL;
+ }
+
+ gpio_tbl = gpio_conf->cam_gpio_req_tbl;
+ size = gpio_conf->cam_gpio_req_tbl_size;
+
+ if (!gpio_tbl || !size) {
+ CAM_ERR(CAM_SENSOR, "invalid gpio_tbl %pK / size %d",
+ gpio_tbl, size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i++) {
+ CAM_DBG(CAM_SENSOR, "i: %d, gpio %d dir %ld", i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ }
+
+ if (gpio_en) {
+ for (i = 0; i < size; i++) {
+ rc = cam_res_mgr_gpio_request(soc_info->dev,
+ gpio_tbl[i].gpio,
+ gpio_tbl[i].flags, gpio_tbl[i].label);
+ if (rc) {
+ /*
+ * After GPIO request fails, contine to
+ * apply new gpios, outout a error message
+ * for driver bringup debug
+ */
+ CAM_ERR(CAM_SENSOR, "gpio %d:%s request fails",
+ gpio_tbl[i].gpio, gpio_tbl[i].label);
+ }
+ }
+ } else {
+ cam_res_mgr_gpio_free_arry(soc_info->dev, gpio_tbl, size);
+ }
+
+ return rc;
+}
+
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+ int cmd_length, struct cam_sensor_power_ctrl_t *power_info)
+{
+ int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
+ int32_t i = 0, pwr_up = 0, pwr_down = 0;
+ struct cam_sensor_power_setting *pwr_settings;
+ void *ptr = cmd_buf, *scr;
+ struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
+ struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
+
+ if (!pwr_cmd || !cmd_length) {
+ CAM_ERR(CAM_SENSOR, "Invalid Args: pwr_cmd %pK, cmd_length: %d",
+ pwr_cmd, cmd_length);
+ return -EINVAL;
+ }
+
+ power_info->power_setting_size = 0;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting) *
+ MAX_POWER_CONFIG, GFP_KERNEL);
+ if (!power_info->power_setting)
+ return -ENOMEM;
+
+ power_info->power_down_setting_size = 0;
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting) *
+ MAX_POWER_CONFIG, GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ kfree(power_info->power_setting);
+ power_info->power_setting = NULL;
+ power_info->power_setting_size = 0;
+ return -ENOMEM;
+ }
+
+ while (tot_size < cmd_length) {
+ if (cmm_hdr->cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+ struct cam_cmd_power *pwr_cmd =
+ (struct cam_cmd_power *)ptr;
+
+ power_info->power_setting_size += pwr_cmd->count;
+ if (power_info->power_setting_size > MAX_POWER_CONFIG) {
+ CAM_ERR(CAM_SENSOR,
+ "Invalid: power up setting size %d",
+ power_info->power_setting_size);
+ rc = -EINVAL;
+ goto free_power_settings;
+ }
+ scr = ptr + sizeof(struct cam_cmd_power);
+ tot_size = tot_size + sizeof(struct cam_cmd_power);
+
+ if (pwr_cmd->count == 0)
+ CAM_WARN(CAM_SENSOR, "pwr_up_size is zero");
+
+ for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
+ power_info->power_setting[pwr_up].seq_type =
+ pwr_cmd->power_settings[i].power_seq_type;
+ power_info->power_setting[pwr_up].config_val =
+ pwr_cmd->power_settings[i].config_val_low;
+ power_info->power_setting[pwr_up].delay = 0;
+ if (i) {
+ scr = scr +
+ sizeof(
+ struct cam_power_settings);
+ tot_size = tot_size +
+ sizeof(
+ struct cam_power_settings);
+ }
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR,
+ "Error: Cmd Buffer is wrong");
+ rc = -EINVAL;
+ goto free_power_settings;
+ }
+ CAM_DBG(CAM_SENSOR,
+ "Seq Type[%d]: %d Config_val: %ld", pwr_up,
+ power_info->power_setting[pwr_up].seq_type,
+ power_info->power_setting[pwr_up].config_val);
+ }
+ last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
+ ptr = (void *) scr;
+ cmm_hdr = (struct common_header *)ptr;
+ } else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
+ struct cam_cmd_unconditional_wait *wait_cmd =
+ (struct cam_cmd_unconditional_wait *)ptr;
+ if ((wait_cmd->op_code ==
+ CAMERA_SENSOR_WAIT_OP_SW_UCND) &&
+ (last_cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_UP)) {
+ if (pwr_up > 0) {
+ pwr_settings =
+ &power_info->power_setting[pwr_up - 1];
+ pwr_settings->delay +=
+ wait_cmd->delay;
+ } else {
+ CAM_ERR(CAM_SENSOR,
+ "Delay is expected only after valid power up setting");
+ }
+ } else if ((wait_cmd->op_code ==
+ CAMERA_SENSOR_WAIT_OP_SW_UCND) &&
+ (last_cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_DOWN)) {
+ if (pwr_down > 0) {
+ pwr_settings =
+ &power_info->power_down_setting[
+ pwr_down - 1];
+ pwr_settings->delay +=
+ wait_cmd->delay;
+ } else {
+ CAM_ERR(CAM_SENSOR,
+ "Delay is expected only after valid power up setting");
+ }
+ } else {
+ CAM_DBG(CAM_SENSOR, "Invalid op code: %d",
+ wait_cmd->op_code);
+ }
+
+ tot_size = tot_size +
+ sizeof(struct cam_cmd_unconditional_wait);
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR, "Command Buffer is wrong");
+ return -EINVAL;
+ }
+ scr = (void *) (wait_cmd);
+ ptr = (void *)
+ (scr +
+ sizeof(struct cam_cmd_unconditional_wait));
+ CAM_DBG(CAM_SENSOR, "ptr: %pK sizeof: %d Next: %pK",
+ scr, (int32_t)sizeof(
+ struct cam_cmd_unconditional_wait), ptr);
+
+ cmm_hdr = (struct common_header *)ptr;
+ } else if (cmm_hdr->cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+ struct cam_cmd_power *pwr_cmd =
+ (struct cam_cmd_power *)ptr;
+
+ scr = ptr + sizeof(struct cam_cmd_power);
+ tot_size = tot_size + sizeof(struct cam_cmd_power);
+ power_info->power_down_setting_size += pwr_cmd->count;
+ if (power_info->power_down_setting_size >
+ MAX_POWER_CONFIG) {
+ CAM_ERR(CAM_SENSOR,
+ "Invalid: power down setting size %d",
+ power_info->power_down_setting_size);
+ rc = -EINVAL;
+ goto free_power_settings;
+ }
+
+ if (pwr_cmd->count == 0)
+ CAM_ERR(CAM_SENSOR, "pwr_down size is zero");
+
+ for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
+ pwr_settings =
+ &power_info->power_down_setting[pwr_down];
+ pwr_settings->seq_type =
+ pwr_cmd->power_settings[i].power_seq_type;
+ pwr_settings->config_val =
+ pwr_cmd->power_settings[i].config_val_low;
+ power_info->power_down_setting[pwr_down].delay
+ = 0;
+ if (i) {
+ scr = scr +
+ sizeof(
+ struct cam_power_settings);
+ tot_size =
+ tot_size +
+ sizeof(
+ struct cam_power_settings);
+ }
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR,
+ "Command Buffer is wrong");
+ rc = -EINVAL;
+ goto free_power_settings;
+ }
+ CAM_DBG(CAM_SENSOR,
+ "Seq Type[%d]: %d Config_val: %ld",
+ pwr_down, pwr_settings->seq_type,
+ pwr_settings->config_val);
+ }
+ last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
+ ptr = (void *) scr;
+ cmm_hdr = (struct common_header *)ptr;
+ } else {
+ CAM_ERR(CAM_SENSOR,
+ "Error: Un expected Header Type: %d",
+ cmm_hdr->cmd_type);
+ rc = -EINVAL;
+ goto free_power_settings;
+ }
+ }
+
+ return rc;
+free_power_settings:
+ kfree(power_info->power_down_setting);
+ kfree(power_info->power_setting);
+ power_info->power_down_setting = NULL;
+ power_info->power_setting = NULL;
+ power_info->power_down_setting_size = 0;
+ power_info->power_setting_size = 0;
+ return rc;
+}
+
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0, i;
+ int count = 0;
+ const char *seq_name = NULL;
+ uint32_t *array = NULL;
+ struct cam_sensor_power_setting *ps;
+ int c, end;
+
+ if (!power_info)
+ return -EINVAL;
+
+ count = of_property_count_strings(of_node, "qcom,cam-power-seq-type");
+ power_info->power_setting_size = count;
+
+ CAM_DBG(CAM_SENSOR, "qcom,cam-power-seq-type count %d", count);
+
+ if (count <= 0)
+ return 0;
+
+ ps = kcalloc(count, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+ power_info->power_setting = ps;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-power-seq-type", i, &seq_name);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ goto ERROR1;
+ }
+ CAM_DBG(CAM_SENSOR, "seq_name[%d] = %s", i, seq_name);
+ if (!strcmp(seq_name, "cam_vio")) {
+ ps[i].seq_type = SENSOR_VIO;
+ } else if (!strcmp(seq_name, "cam_vana")) {
+ ps[i].seq_type = SENSOR_VANA;
+ } else if (!strcmp(seq_name, "cam_clk")) {
+ ps[i].seq_type = SENSOR_MCLK;
+ } else {
+ CAM_ERR(CAM_SENSOR, "unrecognized seq-type %s",
+ seq_name);
+ rc = -EILSEQ;
+ goto ERROR1;
+ }
+ CAM_DBG(CAM_SENSOR, "seq_type[%d] %d", i, ps[i].seq_type);
+ }
+
+ array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!array) {
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-cfg-val",
+ array, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed ");
+ goto ERROR2;
+ }
+
+ for (i = 0; i < count; i++) {
+ ps[i].config_val = array[i];
+ CAM_DBG(CAM_SENSOR, "power_setting[%d].config_val = %ld", i,
+ ps[i].config_val);
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-delay",
+ array, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ ps[i].delay = array[i];
+ CAM_DBG(CAM_SENSOR, "power_setting[%d].delay = %d", i,
+ ps[i].delay);
+ }
+ kfree(array);
+
+ power_info->power_down_setting =
+ kcalloc(count, sizeof(*ps), GFP_KERNEL);
+
+ if (!power_info->power_down_setting) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ power_info->power_down_setting_size = count;
+
+ end = count - 1;
+
+ for (c = 0; c < count; c++) {
+ power_info->power_down_setting[c] = ps[end];
+ end--;
+ }
+ return rc;
+ERROR2:
+ kfree(array);
+ERROR1:
+ kfree(ps);
+ return rc;
+}
+
+int cam_sensor_util_init_gpio_pin_tbl(
+ struct cam_hw_soc_info *soc_info,
+ struct msm_camera_gpio_num_info **pgpio_num_info)
+{
+ int rc = 0, val = 0;
+ uint32_t gpio_array_size;
+ struct device_node *of_node = NULL;
+ struct cam_soc_gpio_data *gconf = NULL;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+ if (!soc_info->dev) {
+ CAM_ERR(CAM_SENSOR, "device node NULL");
+ return -EINVAL;
+ }
+
+ of_node = soc_info->dev->of_node;
+
+ gconf = soc_info->gpio_data;
+ if (!gconf) {
+ CAM_ERR(CAM_SENSOR, "No gpio_common_table is found");
+ return -EINVAL;
+ }
+
+ if (!gconf->cam_gpio_common_tbl) {
+ CAM_ERR(CAM_SENSOR, "gpio_common_table is not initialized");
+ return -EINVAL;
+ }
+
+ gpio_array_size = gconf->cam_gpio_common_tbl_size;
+
+ if (!gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "invalid size of gpio table");
+ return -EINVAL;
+ }
+
+ *pgpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
+ GFP_KERNEL);
+ if (!*pgpio_num_info)
+ return -ENOMEM;
+ gpio_num_info = *pgpio_num_info;
+
+ rc = of_property_read_u32(of_node, "gpio-vana", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "read gpio-vana failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-vana invalid %d", val);
+ rc = -EINVAL;
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_VANA] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VANA] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-vana %d",
+ gpio_num_info->gpio_num[SENSOR_VANA]);
+ }
+
+ rc = of_property_read_u32(of_node, "gpio-vio", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "read gpio-vio failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-vio invalid %d", val);
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_VIO] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VIO] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-vio %d",
+ gpio_num_info->gpio_num[SENSOR_VIO]);
+ }
+
+ rc = of_property_read_u32(of_node, "gpio-vaf", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "read gpio-vaf failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-vaf invalid %d", val);
+ rc = -EINVAL;
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_VAF] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VAF] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-vaf %d",
+ gpio_num_info->gpio_num[SENSOR_VAF]);
+ }
+
+ rc = of_property_read_u32(of_node, "gpio-vdig", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "read gpio-vdig failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-vdig invalid %d", val);
+ rc = -EINVAL;
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_VDIG] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VDIG] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-vdig %d",
+ gpio_num_info->gpio_num[SENSOR_VDIG]);
+ }
+
+ rc = of_property_read_u32(of_node, "gpio-reset", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "read gpio-reset failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-reset invalid %d", val);
+ rc = -EINVAL;
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_RESET] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_RESET] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-reset %d",
+ gpio_num_info->gpio_num[SENSOR_RESET]);
+ }
+
+ rc = of_property_read_u32(of_node, "gpio-standby", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-standby failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-standby invalid %d", val);
+ rc = -EINVAL;
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_STANDBY] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_STANDBY] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-standby %d",
+ gpio_num_info->gpio_num[SENSOR_STANDBY]);
+ }
+
+ rc = of_property_read_u32(of_node, "gpio-af-pwdm", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-af-pwdm failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-af-pwdm invalid %d", val);
+ rc = -EINVAL;
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-af-pwdm %d",
+ gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
+ }
+
+ rc = of_property_read_u32(of_node, "gpio-custom1", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-custom1 failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-custom1 invalid %d", val);
+ rc = -EINVAL;
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-custom1 %d",
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
+ }
+
+ rc = of_property_read_u32(of_node, "gpio-custom2", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-custom2 failed rc %d", rc);
+ goto free_gpio_info;
+ } else if (val >= gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "gpio-custom2 invalid %d", val);
+ rc = -EINVAL;
+ goto free_gpio_info;
+ }
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-custom2 %d",
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
+ } else {
+ rc = 0;
+ }
+
+ return rc;
+
+free_gpio_info:
+ kfree(gpio_num_info);
+ gpio_num_info = NULL;
+ return rc;
+}
+
+int msm_camera_pinctrl_init(
+ struct msm_pinctrl_info *sensor_pctrl, struct device *dev)
+{
+
+ sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
+ CAM_DBG(CAM_SENSOR, "Getting pinctrl handle failed");
+ return -EINVAL;
+ }
+ sensor_pctrl->gpio_state_active =
+ pinctrl_lookup_state(sensor_pctrl->pinctrl,
+ CAM_SENSOR_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to get the active state pinctrl handle");
+ return -EINVAL;
+ }
+ sensor_pctrl->gpio_state_suspend
+ = pinctrl_lookup_state(sensor_pctrl->pinctrl,
+ CAM_SENSOR_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to get the suspend state pinctrl handle");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_sensor_bob_pwm_mode_switch(struct cam_hw_soc_info *soc_info,
+ int bob_reg_idx, bool flag)
+{
+ int rc = 0;
+ uint32_t op_current =
+ (flag == true) ? soc_info->rgltr_op_mode[bob_reg_idx] : 0;
+
+ if (soc_info->rgltr[bob_reg_idx] != NULL) {
+ rc = regulator_set_load(soc_info->rgltr[bob_reg_idx],
+ op_current);
+ if (rc)
+ CAM_WARN(CAM_SENSOR,
+ "BoB PWM SetLoad failed rc: %d", rc);
+ }
+
+ return rc;
+}
+
+int msm_cam_sensor_handle_reg_gpio(int seq_type,
+ struct msm_camera_gpio_num_info *gpio_num_info, int val)
+{
+ int gpio_offset = -1;
+
+ if (!gpio_num_info) {
+ CAM_INFO(CAM_SENSOR, "Input Parameters are not proper");
+ return 0;
+ }
+
+ CAM_DBG(CAM_SENSOR, "Seq type: %d, config: %d", seq_type, val);
+
+ gpio_offset = seq_type;
+
+ if (gpio_num_info->valid[gpio_offset] == 1) {
+ CAM_DBG(CAM_SENSOR, "VALID GPIO offset: %d, seqtype: %d",
+ gpio_offset, seq_type);
+ cam_res_mgr_gpio_set_value(
+ gpio_num_info->gpio_num
+ [gpio_offset], val);
+ }
+
+ return 0;
+}
+
+static int cam_config_mclk_reg(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info, int32_t index)
+{
+ int32_t num_vreg = 0, j = 0, rc = 0, idx = 0;
+ struct cam_sensor_power_setting *ps = NULL;
+ struct cam_sensor_power_setting *pd = NULL;
+
+ num_vreg = soc_info->num_rgltr;
+
+ pd = &ctrl->power_down_setting[index];
+
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(soc_info->rgltr_name[j], "cam_clk")) {
+ ps = NULL;
+ for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+ if (ctrl->power_setting[idx].seq_type ==
+ pd->seq_type) {
+ ps = &ctrl->power_setting[idx];
+ break;
+ }
+ }
+
+ if (ps != NULL) {
+ CAM_DBG(CAM_SENSOR, "Disable MCLK Regulator");
+ rc = cam_soc_util_regulator_disable(
+ soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+
+ if (rc) {
+ CAM_ERR(CAM_SENSOR,
+ "MCLK REG DISALBE FAILED: %d",
+ rc);
+ return rc;
+ }
+
+ ps->data[0] =
+ soc_info->rgltr[j];
+
+ regulator_put(
+ soc_info->rgltr[j]);
+ soc_info->rgltr[j] = NULL;
+ }
+ }
+ }
+
+ return rc;
+}
+
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0, index = 0, no_gpio = 0, ret = 0, num_vreg, j = 0, i = 0;
+ int32_t vreg_idx = -1;
+ struct cam_sensor_power_setting *power_setting = NULL;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+ CAM_DBG(CAM_SENSOR, "Enter");
+ if (!ctrl) {
+ CAM_ERR(CAM_SENSOR, "Invalid ctrl handle");
+ return -EINVAL;
+ }
+
+ gpio_num_info = ctrl->gpio_num_info;
+ num_vreg = soc_info->num_rgltr;
+
+ if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+ CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
+ return -EINVAL;
+ }
+
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(true);
+
+ ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
+ if (ret < 0) {
+ /* Some sensor subdev no pinctrl. */
+ CAM_DBG(CAM_SENSOR, "Initialization of pinctrl failed");
+ ctrl->cam_pinctrl_status = 0;
+ } else {
+ ctrl->cam_pinctrl_status = 1;
+ }
+
+ if (cam_res_mgr_shared_pinctrl_init()) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to init shared pinctrl");
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_request_gpio_table(soc_info, 1);
+ if (rc < 0)
+ no_gpio = rc;
+
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(
+ ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_active);
+ if (ret)
+ CAM_ERR(CAM_SENSOR, "cannot set pin to active state");
+ }
+
+ ret = cam_res_mgr_shared_pinctrl_select_state(true);
+ if (ret)
+ CAM_ERR(CAM_SENSOR,
+ "Cannot set shared pin to active state");
+
+ CAM_DBG(CAM_SENSOR, "power setting size: %d", ctrl->power_setting_size);
+
+ for (index = 0; index < ctrl->power_setting_size; index++) {
+ CAM_DBG(CAM_SENSOR, "index: %d", index);
+ power_setting = &ctrl->power_setting[index];
+ if (!power_setting) {
+ CAM_ERR(CAM_SENSOR,
+ "Invalid power up settings for index %d",
+ index);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_SENSOR, "seq_type %d", power_setting->seq_type);
+
+ switch (power_setting->seq_type) {
+ case SENSOR_MCLK:
+ if (power_setting->seq_val >= soc_info->num_clk) {
+ CAM_ERR(CAM_SENSOR, "clk index %d >= max %u",
+ power_setting->seq_val,
+ soc_info->num_clk);
+ goto power_up_failed;
+ }
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_clk")) {
+ CAM_DBG(CAM_SENSOR,
+ "Enable cam_clk: %d", j);
+
+ soc_info->rgltr[j] =
+ regulator_get(
+ soc_info->dev,
+ soc_info->rgltr_name[j]);
+
+ if (IS_ERR_OR_NULL(
+ soc_info->rgltr[j])) {
+ rc = PTR_ERR(
+ soc_info->rgltr[j]);
+ rc = rc ? rc : -EINVAL;
+ CAM_ERR(CAM_SENSOR,
+ "vreg %s %d",
+ soc_info->rgltr_name[j],
+ rc);
+ soc_info->rgltr[j] = NULL;
+ goto power_up_failed;
+ }
+
+ rc = cam_soc_util_regulator_enable(
+ soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ if (rc) {
+ CAM_ERR(CAM_SENSOR,
+ "Reg enable failed");
+ goto power_up_failed;
+ }
+ power_setting->data[0] =
+ soc_info->rgltr[j];
+ }
+ }
+ if (power_setting->config_val)
+ soc_info->clk_rate[0][power_setting->seq_val] =
+ power_setting->config_val;
+
+ for (j = 0; j < soc_info->num_clk; j++) {
+ rc = cam_soc_util_clk_enable(soc_info->clk[j],
+ soc_info->clk_name[j],
+ soc_info->clk_rate[0][j]);
+ if (rc)
+ break;
+ }
+
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "clk enable failed");
+ goto power_up_failed;
+ }
+ break;
+ case SENSOR_RESET:
+ case SENSOR_STANDBY:
+ case SENSOR_CUSTOM_GPIO1:
+ case SENSOR_CUSTOM_GPIO2:
+ if (no_gpio) {
+ CAM_ERR(CAM_SENSOR, "request gpio failed");
+ return no_gpio;
+ }
+ if (!gpio_num_info) {
+ CAM_ERR(CAM_SENSOR, "Invalid gpio_num_info");
+ goto power_up_failed;
+ }
+ CAM_DBG(CAM_SENSOR, "gpio set val %d",
+ gpio_num_info->gpio_num
+ [power_setting->seq_type]);
+
+ rc = msm_cam_sensor_handle_reg_gpio(
+ power_setting->seq_type,
+ gpio_num_info,
+ (int) power_setting->config_val);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Error in handling VREG GPIO");
+ goto power_up_failed;
+ }
+ break;
+ case SENSOR_VANA:
+ case SENSOR_VDIG:
+ case SENSOR_VIO:
+ case SENSOR_VAF:
+ case SENSOR_VAF_PWDM:
+ case SENSOR_CUSTOM_REG1:
+ case SENSOR_CUSTOM_REG2:
+ if (power_setting->seq_val == INVALID_VREG)
+ break;
+
+ if (power_setting->seq_val >= CAM_VREG_MAX) {
+ CAM_ERR(CAM_SENSOR, "vreg index %d >= max %d",
+ power_setting->seq_val,
+ CAM_VREG_MAX);
+ goto power_up_failed;
+ }
+ if (power_setting->seq_val < num_vreg) {
+ CAM_DBG(CAM_SENSOR, "Enable Regulator");
+ vreg_idx = power_setting->seq_val;
+
+ soc_info->rgltr[vreg_idx] =
+ regulator_get(soc_info->dev,
+ soc_info->rgltr_name[vreg_idx]);
+ if (IS_ERR_OR_NULL(
+ soc_info->rgltr[vreg_idx])) {
+ rc = PTR_ERR(soc_info->rgltr[vreg_idx]);
+ rc = rc ? rc : -EINVAL;
+
+ CAM_ERR(CAM_SENSOR, "%s get failed %d",
+ soc_info->rgltr_name[vreg_idx],
+ rc);
+
+ soc_info->rgltr[vreg_idx] = NULL;
+ goto power_up_failed;
+ }
+
+ rc = cam_soc_util_regulator_enable(
+ soc_info->rgltr[vreg_idx],
+ soc_info->rgltr_name[vreg_idx],
+ soc_info->rgltr_min_volt[vreg_idx],
+ soc_info->rgltr_max_volt[vreg_idx],
+ soc_info->rgltr_op_mode[vreg_idx],
+ soc_info->rgltr_delay[vreg_idx]);
+ if (rc) {
+ CAM_ERR(CAM_SENSOR,
+ "Reg Enable failed for %s",
+ soc_info->rgltr_name[vreg_idx]);
+ goto power_up_failed;
+ }
+ power_setting->data[0] =
+ soc_info->rgltr[vreg_idx];
+ } else {
+ CAM_ERR(CAM_SENSOR, "usr_idx:%d dts_idx:%d",
+ power_setting->seq_val, num_vreg);
+ }
+
+ rc = msm_cam_sensor_handle_reg_gpio(
+ power_setting->seq_type,
+ gpio_num_info, 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Error in handling VREG GPIO");
+ goto power_up_failed;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
+ power_setting->seq_type);
+ break;
+ }
+ if (power_setting->delay > 20)
+ msleep(power_setting->delay);
+ else if (power_setting->delay)
+ usleep_range(power_setting->delay * 1000,
+ (power_setting->delay * 1000) + 1000);
+ }
+
+ ret = cam_res_mgr_shared_pinctrl_post_init();
+ if (ret)
+ CAM_ERR(CAM_SENSOR,
+ "Failed to post init shared pinctrl");
+
+ return 0;
+power_up_failed:
+ CAM_ERR(CAM_SENSOR, "failed");
+ for (index--; index >= 0; index--) {
+ CAM_DBG(CAM_SENSOR, "index %d", index);
+ power_setting = &ctrl->power_setting[index];
+ CAM_DBG(CAM_SENSOR, "type %d",
+ power_setting->seq_type);
+ switch (power_setting->seq_type) {
+ case SENSOR_MCLK:
+ for (i = soc_info->num_clk - 1; i >= 0; i--) {
+ cam_soc_util_clk_disable(soc_info->clk[i],
+ soc_info->clk_name[i]);
+ }
+ ret = cam_config_mclk_reg(ctrl, soc_info, index);
+ if (ret < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "config clk reg failed rc: %d", ret);
+ continue;
+ }
+ break;
+ case SENSOR_RESET:
+ case SENSOR_STANDBY:
+ case SENSOR_CUSTOM_GPIO1:
+ case SENSOR_CUSTOM_GPIO2:
+ if (!gpio_num_info)
+ continue;
+ if (!gpio_num_info->valid
+ [power_setting->seq_type])
+ continue;
+ cam_res_mgr_gpio_set_value(
+ gpio_num_info->gpio_num
+ [power_setting->seq_type], GPIOF_OUT_INIT_LOW);
+ break;
+ case SENSOR_VANA:
+ case SENSOR_VDIG:
+ case SENSOR_VIO:
+ case SENSOR_VAF:
+ case SENSOR_VAF_PWDM:
+ case SENSOR_CUSTOM_REG1:
+ case SENSOR_CUSTOM_REG2:
+ if (power_setting->seq_val < num_vreg) {
+ CAM_DBG(CAM_SENSOR, "Disable Regulator");
+ vreg_idx = power_setting->seq_val;
+
+ rc = cam_soc_util_regulator_disable(
+ soc_info->rgltr[vreg_idx],
+ soc_info->rgltr_name[vreg_idx],
+ soc_info->rgltr_min_volt[vreg_idx],
+ soc_info->rgltr_max_volt[vreg_idx],
+ soc_info->rgltr_op_mode[vreg_idx],
+ soc_info->rgltr_delay[vreg_idx]);
+
+ if (rc) {
+ CAM_ERR(CAM_SENSOR,
+ "Fail to disalbe reg: %s",
+ soc_info->rgltr_name[vreg_idx]);
+ soc_info->rgltr[vreg_idx] = NULL;
+ msm_cam_sensor_handle_reg_gpio(
+ power_setting->seq_type,
+ gpio_num_info,
+ GPIOF_OUT_INIT_LOW);
+ continue;
+ }
+ power_setting->data[0] =
+ soc_info->rgltr[vreg_idx];
+
+ regulator_put(soc_info->rgltr[vreg_idx]);
+ soc_info->rgltr[vreg_idx] = NULL;
+ } else {
+ CAM_ERR(CAM_SENSOR, "seq_val:%d > num_vreg: %d",
+ power_setting->seq_val, num_vreg);
+ }
+
+ msm_cam_sensor_handle_reg_gpio(power_setting->seq_type,
+ gpio_num_info, GPIOF_OUT_INIT_LOW);
+
+ break;
+ default:
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
+ power_setting->seq_type);
+ break;
+ }
+ if (power_setting->delay > 20) {
+ msleep(power_setting->delay);
+ } else if (power_setting->delay) {
+ usleep_range(power_setting->delay * 1000,
+ (power_setting->delay * 1000) + 1000);
+ }
+ }
+
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(
+ ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_suspend);
+ if (ret)
+ CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
+ devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ }
+
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(false);
+
+ cam_res_mgr_shared_pinctrl_select_state(false);
+ cam_res_mgr_shared_pinctrl_put();
+
+ ctrl->cam_pinctrl_status = 0;
+
+ cam_sensor_util_request_gpio_table(soc_info, 0);
+
+ return rc;
+}
+
+static struct cam_sensor_power_setting*
+msm_camera_get_power_settings(struct cam_sensor_power_ctrl_t *ctrl,
+ enum msm_camera_power_seq_type seq_type,
+ uint16_t seq_val)
+{
+ struct cam_sensor_power_setting *power_setting, *ps = NULL;
+ int idx;
+
+ for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+ power_setting = &ctrl->power_setting[idx];
+ if (power_setting->seq_type == seq_type &&
+ power_setting->seq_val == seq_val) {
+ ps = power_setting;
+ return ps;
+ }
+
+ }
+
+ return ps;
+}
+
+int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info)
+{
+ int index = 0, ret = 0, num_vreg = 0, i;
+ struct cam_sensor_power_setting *pd = NULL;
+ struct cam_sensor_power_setting *ps = NULL;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+ CAM_DBG(CAM_SENSOR, "Enter");
+ if (!ctrl || !soc_info) {
+ CAM_ERR(CAM_SENSOR, "failed ctrl %pK", ctrl);
+ return -EINVAL;
+ }
+
+ gpio_num_info = ctrl->gpio_num_info;
+ num_vreg = soc_info->num_rgltr;
+
+ if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+ CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
+ return -EINVAL;
+ }
+
+ if (ctrl->power_down_setting_size > MAX_POWER_CONFIG) {
+ CAM_ERR(CAM_SENSOR, "Invalid: power setting size %d",
+ ctrl->power_setting_size);
+ return -EINVAL;
+ }
+
+ for (index = 0; index < ctrl->power_down_setting_size; index++) {
+ CAM_DBG(CAM_SENSOR, "power_down_index %d", index);
+ pd = &ctrl->power_down_setting[index];
+ if (!pd) {
+ CAM_ERR(CAM_SENSOR,
+ "Invalid power down settings for index %d",
+ index);
+ return -EINVAL;
+ }
+
+ ps = NULL;
+ CAM_DBG(CAM_SENSOR, "seq_type %d", pd->seq_type);
+ switch (pd->seq_type) {
+ case SENSOR_MCLK:
+ for (i = soc_info->num_clk - 1; i >= 0; i--) {
+ cam_soc_util_clk_disable(soc_info->clk[i],
+ soc_info->clk_name[i]);
+ }
+
+ ret = cam_config_mclk_reg(ctrl, soc_info, index);
+ if (ret < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "config clk reg failed rc: %d", ret);
+ continue;
+ }
+ break;
+ case SENSOR_RESET:
+ case SENSOR_STANDBY:
+ case SENSOR_CUSTOM_GPIO1:
+ case SENSOR_CUSTOM_GPIO2:
+
+ if (!gpio_num_info->valid[pd->seq_type])
+ continue;
+
+ cam_res_mgr_gpio_set_value(
+ gpio_num_info->gpio_num
+ [pd->seq_type],
+ (int) pd->config_val);
+
+ break;
+ case SENSOR_VANA:
+ case SENSOR_VDIG:
+ case SENSOR_VIO:
+ case SENSOR_VAF:
+ case SENSOR_VAF_PWDM:
+ case SENSOR_CUSTOM_REG1:
+ case SENSOR_CUSTOM_REG2:
+ if (pd->seq_val == INVALID_VREG)
+ break;
+
+ ps = msm_camera_get_power_settings(
+ ctrl, pd->seq_type,
+ pd->seq_val);
+ if (ps) {
+ if (pd->seq_val < num_vreg) {
+ CAM_DBG(CAM_SENSOR,
+ "Disable Regulator");
+ ret = cam_soc_util_regulator_disable(
+ soc_info->rgltr[ps->seq_val],
+ soc_info->rgltr_name[ps->seq_val],
+ soc_info->rgltr_min_volt[ps->seq_val],
+ soc_info->rgltr_max_volt[ps->seq_val],
+ soc_info->rgltr_op_mode[ps->seq_val],
+ soc_info->rgltr_delay[ps->seq_val]);
+ if (ret) {
+ CAM_ERR(CAM_SENSOR,
+ "Reg: %s disable failed",
+ soc_info->rgltr_name[
+ ps->seq_val]);
+ soc_info->rgltr[ps->seq_val] =
+ NULL;
+ msm_cam_sensor_handle_reg_gpio(
+ pd->seq_type,
+ gpio_num_info,
+ GPIOF_OUT_INIT_LOW);
+ continue;
+ }
+ ps->data[0] =
+ soc_info->rgltr[ps->seq_val];
+ regulator_put(
+ soc_info->rgltr[ps->seq_val]);
+ soc_info->rgltr[ps->seq_val] = NULL;
+ } else {
+ CAM_ERR(CAM_SENSOR,
+ "seq_val:%d > num_vreg: %d",
+ pd->seq_val,
+ num_vreg);
+ }
+ } else
+ CAM_ERR(CAM_SENSOR,
+ "error in power up/down seq");
+
+ ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type,
+ gpio_num_info, GPIOF_OUT_INIT_LOW);
+
+ if (ret < 0)
+ CAM_ERR(CAM_SENSOR,
+ "Error disabling VREG GPIO");
+ break;
+ default:
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
+ pd->seq_type);
+ break;
+ }
+ if (pd->delay > 20)
+ msleep(pd->delay);
+ else if (pd->delay)
+ usleep_range(pd->delay * 1000,
+ (pd->delay * 1000) + 1000);
+ }
+
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(
+ ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_suspend);
+ if (ret)
+ CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
+
+ devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ }
+
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(false);
+
+ cam_res_mgr_shared_pinctrl_select_state(false);
+ cam_res_mgr_shared_pinctrl_put();
+
+ ctrl->cam_pinctrl_status = 0;
+
+ cam_sensor_util_request_gpio_table(soc_info, 0);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
new file mode 100644
index 000000000000..583ddb14243b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_UTIL_H_
+#define _CAM_SENSOR_UTIL_H_
+
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+#include "cam_sensor_io.h"
+
+#define INVALID_VREG 100
+
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_ctrl_t *power_info);
+
+int msm_camera_pinctrl_init
+ (struct msm_pinctrl_info *sensor_pctrl, struct device *dev);
+
+int cam_sensor_i2c_command_parser(struct camera_io_master *io_master,
+ struct i2c_settings_array *i2c_reg_settings,
+ struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers);
+
+int cam_sensor_util_i2c_apply_setting(struct camera_io_master *io_master_info,
+ struct i2c_settings_list *i2c_list);
+
+int32_t delete_request(struct i2c_settings_array *i2c_array);
+int cam_sensor_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, int gpio_en);
+
+int cam_sensor_util_init_gpio_pin_tbl(
+ struct cam_hw_soc_info *soc_info,
+ struct msm_camera_gpio_num_info **pgpio_num_info);
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info);
+
+int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info);
+
+int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_setting *power_setting,
+ uint16_t power_setting_size);
+
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+ int cmd_length, struct cam_sensor_power_ctrl_t *power_info);
+
+int cam_sensor_bob_pwm_mode_switch(struct cam_hw_soc_info *soc_info,
+ int bob_reg_idx, bool flag);
+#endif /* _CAM_SENSOR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_smmu/Makefile b/drivers/media/platform/msm/camera_v3/cam_smmu/Makefile
new file mode 100644
index 000000000000..0bae5e0e6496
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_smmu/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera_v3/cam_smmu/cam_smmu_api.c
new file mode 100644
index 000000000000..7ddfa3d2ec56
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_smmu/cam_smmu_api.c
@@ -0,0 +1,3523 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/workqueue.h>
+#include <linux/genalloc.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+#include <uapi/media/cam_req_mgr.h>
+#include "cam_smmu_api.h"
+#include "cam_debug_util.h"
+
+#define SHARED_MEM_POOL_GRANULARITY 12
+
+#define IOMMU_INVALID_DIR -1
+#define BYTE_SIZE 8
+#define COOKIE_NUM_BYTE 2
+#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
+#define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
+#define HANDLE_INIT (-1)
+#define CAM_SMMU_CB_MAX 5
+
+#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
+#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
+
+static int g_num_pf_handled = 4;
+module_param(g_num_pf_handled, int, 0644);
+
+struct firmware_alloc_info {
+ struct device *fw_dev;
+ void *fw_kva;
+ dma_addr_t fw_dma_hdl;
+};
+
+struct firmware_alloc_info icp_fw;
+
+struct cam_smmu_work_payload {
+ int idx;
+ struct iommu_domain *domain;
+ struct device *dev;
+ unsigned long iova;
+ int flags;
+ void *token;
+ struct list_head list;
+};
+
+enum cam_protection_type {
+ CAM_PROT_INVALID,
+ CAM_NON_SECURE,
+ CAM_SECURE,
+ CAM_PROT_MAX,
+};
+
+enum cam_iommu_type {
+ CAM_SMMU_INVALID,
+ CAM_QSMMU,
+ CAM_ARM_SMMU,
+ CAM_SMMU_MAX,
+};
+
+enum cam_smmu_buf_state {
+ CAM_SMMU_BUFF_EXIST,
+ CAM_SMMU_BUFF_NOT_EXIST,
+};
+
+enum cam_smmu_init_dir {
+ CAM_SMMU_TABLE_INIT,
+ CAM_SMMU_TABLE_DEINIT,
+};
+
+struct scratch_mapping {
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+};
+
+struct secheap_buf_info {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+};
+
+struct cam_context_bank_info {
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ dma_addr_t va_start;
+ size_t va_len;
+ const char *name;
+ bool is_secure;
+ uint8_t scratch_buf_support;
+ uint8_t firmware_support;
+ uint8_t shared_support;
+ uint8_t io_support;
+ uint8_t secheap_support;
+ uint8_t qdss_support;
+ dma_addr_t qdss_phy_addr;
+ bool is_fw_allocated;
+ bool is_secheap_allocated;
+ bool is_qdss_allocated;
+
+ struct scratch_mapping scratch_map;
+ struct gen_pool *shared_mem_pool;
+
+ struct cam_smmu_region_info scratch_info;
+ struct cam_smmu_region_info firmware_info;
+ struct cam_smmu_region_info shared_info;
+ struct cam_smmu_region_info io_info;
+ struct cam_smmu_region_info secheap_info;
+ struct cam_smmu_region_info qdss_info;
+ struct secheap_buf_info secheap_buf;
+
+ struct list_head smmu_buf_list;
+ struct list_head smmu_buf_kernel_list;
+ struct mutex lock;
+ int handle;
+ enum cam_smmu_ops_param state;
+
+ cam_smmu_client_page_fault_handler handler[CAM_SMMU_CB_MAX];
+ void *token[CAM_SMMU_CB_MAX];
+ int cb_count;
+ int secure_count;
+ int pf_count;
+};
+
+struct cam_iommu_cb_set {
+ struct cam_context_bank_info *cb_info;
+ u32 cb_num;
+ u32 cb_init_count;
+ struct work_struct smmu_work;
+ struct mutex payload_list_lock;
+ struct list_head payload_list;
+ u32 non_fatal_fault;
+ u32 enable_iova_guard;
+};
+
+static const struct of_device_id msm_cam_smmu_dt_match[] = {
+ { .compatible = "qcom,msm-cam-smmu", },
+ { .compatible = "qcom,msm-cam-smmu-cb", },
+ { .compatible = "qcom,msm-cam-smmu-fw-dev", },
+ {}
+};
+
+struct cam_dma_buff_info {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ enum dma_data_direction dir;
+ enum cam_smmu_region_id region_id;
+ int iommu_dir;
+ int ref_count;
+ dma_addr_t paddr;
+ struct list_head list;
+ int ion_fd;
+ size_t len;
+ size_t phys_len;
+};
+
+struct cam_sec_buff_info {
+ struct ion_handle *i_hdl;
+ struct ion_client *i_client;
+ enum dma_data_direction dir;
+ int ref_count;
+ dma_addr_t paddr;
+ struct list_head list;
+ int ion_fd;
+ size_t len;
+};
+
+static const char *qdss_region_name = "qdss";
+
+static struct cam_iommu_cb_set iommu_cb_set;
+
+static enum dma_data_direction cam_smmu_translate_dir(
+ enum cam_smmu_map_dir dir);
+
+static int cam_smmu_check_handle_unique(int hdl);
+
+static int cam_smmu_create_iommu_handle(int idx);
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+ int *hdl);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+ int ion_fd);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
+ struct dma_buf *buf);
+
+static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
+ int ion_fd);
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+ dma_addr_t base, size_t size,
+ int order);
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+ size_t size,
+ dma_addr_t *iova);
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+ dma_addr_t addr, size_t size);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+ dma_addr_t virt_addr);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id);
+
+static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
+ struct dma_buf *buf, enum dma_data_direction dma_dir,
+ dma_addr_t *paddr_ptr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+ size_t virt_len,
+ size_t phys_len,
+ unsigned int iommu_dir,
+ dma_addr_t *virt_addr);
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+ struct cam_dma_buff_info *mapping_info, int idx);
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx);
+
+static void cam_smmu_clean_user_buffer_list(int idx);
+
+static void cam_smmu_clean_kernel_buffer_list(int idx);
+
+static void cam_smmu_print_user_list(int idx);
+
+static void cam_smmu_print_kernel_list(int idx);
+
+static void cam_smmu_print_table(void);
+
+static int cam_smmu_probe(struct platform_device *pdev);
+
+static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr);
+
+static void cam_smmu_page_fault_work(struct work_struct *work)
+{
+ int j;
+ int idx;
+ struct cam_smmu_work_payload *payload;
+ uint32_t buf_info;
+
+ mutex_lock(&iommu_cb_set.payload_list_lock);
+ if (list_empty(&iommu_cb_set.payload_list)) {
+ CAM_ERR(CAM_SMMU, "Payload list empty");
+ mutex_unlock(&iommu_cb_set.payload_list_lock);
+ return;
+ }
+
+ payload = list_first_entry(&iommu_cb_set.payload_list,
+ struct cam_smmu_work_payload,
+ list);
+ list_del(&payload->list);
+ mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+ /* Dereference the payload to call the handler */
+ idx = payload->idx;
+ buf_info = cam_smmu_find_closest_mapping(idx, (void *)payload->iova);
+ if (buf_info != 0)
+ CAM_INFO(CAM_SMMU, "closest buf 0x%x idx %d", buf_info, idx);
+
+ for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+ if ((iommu_cb_set.cb_info[idx].handler[j])) {
+ iommu_cb_set.cb_info[idx].handler[j](
+ payload->domain,
+ payload->dev,
+ payload->iova,
+ payload->flags,
+ iommu_cb_set.cb_info[idx].token[j],
+ buf_info);
+ }
+ }
+ kfree(payload);
+}
+
+static void cam_smmu_print_user_list(int idx)
+{
+ struct cam_dma_buff_info *mapping;
+
+ CAM_ERR(CAM_SMMU, "index = %d", idx);
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ CAM_ERR(CAM_SMMU,
+ "ion_fd = %d, paddr= 0x%pK, len = %u, region = %d",
+ mapping->ion_fd, (void *)mapping->paddr,
+ (unsigned int)mapping->len,
+ mapping->region_id);
+ }
+}
+
+static void cam_smmu_print_kernel_list(int idx)
+{
+ struct cam_dma_buff_info *mapping;
+
+ CAM_ERR(CAM_SMMU, "index = %d", idx);
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+ CAM_ERR(CAM_SMMU,
+ "dma_buf = %pK, paddr= 0x%pK, len = %u, region = %d",
+ mapping->buf, (void *)mapping->paddr,
+ (unsigned int)mapping->len,
+ mapping->region_id);
+ }
+}
+
+static void cam_smmu_print_table(void)
+{
+ int i;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ CAM_ERR(CAM_SMMU, "i= %d, handle= %d, name_addr=%pK", i,
+ (int)iommu_cb_set.cb_info[i].handle,
+ (void *)iommu_cb_set.cb_info[i].name);
+ CAM_ERR(CAM_SMMU, "dev = %pK", iommu_cb_set.cb_info[i].dev);
+ }
+}
+
+static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr)
+{
+ struct cam_dma_buff_info *mapping, *closest_mapping = NULL;
+ unsigned long start_addr, end_addr, current_addr;
+ uint32_t buf_handle = 0;
+
+ long delta = 0, lowest_delta = 0;
+
+ current_addr = (unsigned long)vaddr;
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ start_addr = (unsigned long)mapping->paddr;
+ end_addr = (unsigned long)mapping->paddr + mapping->len;
+
+ if (start_addr <= current_addr && current_addr <= end_addr) {
+ closest_mapping = mapping;
+ CAM_INFO(CAM_SMMU,
+ "Found va 0x%lx in:0x%lx-0x%lx, fd %d cb:%s",
+ current_addr, start_addr,
+ end_addr, mapping->ion_fd,
+ iommu_cb_set.cb_info[idx].name);
+ goto end;
+ } else {
+ if (start_addr > current_addr)
+ delta = start_addr - current_addr;
+ else
+ delta = current_addr - end_addr - 1;
+
+ if (delta < lowest_delta || lowest_delta == 0) {
+ lowest_delta = delta;
+ closest_mapping = mapping;
+ }
+ CAM_DBG(CAM_SMMU,
+ "approx va %lx not in range: %lx-%lx fd = %0x",
+ current_addr, start_addr,
+ end_addr, mapping->ion_fd);
+ }
+ }
+
+end:
+ if (closest_mapping) {
+ buf_handle = GET_MEM_HANDLE(idx, closest_mapping->ion_fd);
+ CAM_INFO(CAM_SMMU,
+ "Closest map fd %d 0x%lx 0x%lx-0x%lx buf=%pK mem %0x",
+ closest_mapping->ion_fd, current_addr,
+ (unsigned long)closest_mapping->paddr,
+ (unsigned long)closest_mapping->paddr + mapping->len,
+ closest_mapping->buf,
+ buf_handle);
+ } else
+ CAM_INFO(CAM_SMMU,
+ "Cannot find vaddr:%lx in SMMU %s virt address",
+ current_addr, iommu_cb_set.cb_info[idx].name);
+
+ return buf_handle;
+}
+
+void cam_smmu_set_client_page_fault_handler(int handle,
+ cam_smmu_client_page_fault_handler handler_cb, void *token)
+{
+ int idx, i = 0;
+
+ if (!token || (handle == HANDLE_INIT)) {
+ CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
+ return;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+
+ if (handler_cb) {
+ if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
+ CAM_ERR(CAM_SMMU,
+ "%s Should not regiester more handlers",
+ iommu_cb_set.cb_info[idx].name);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+
+ iommu_cb_set.cb_info[idx].cb_count++;
+
+ for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
+ iommu_cb_set.cb_info[idx].token[i] = token;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ handler_cb;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == token) {
+ iommu_cb_set.cb_info[idx].token[i] = NULL;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ NULL;
+ iommu_cb_set.cb_info[idx].cb_count--;
+ break;
+ }
+ }
+ if (i == CAM_SMMU_CB_MAX)
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl %x no matching tokens: %s",
+ handle, iommu_cb_set.cb_info[idx].name);
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
+void cam_smmu_unset_client_page_fault_handler(int handle, void *token)
+{
+ int idx, i = 0;
+
+ if (!token || (handle == HANDLE_INIT)) {
+ CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
+ return;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+
+ for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == token) {
+ iommu_cb_set.cb_info[idx].token[i] = NULL;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ NULL;
+ iommu_cb_set.cb_info[idx].cb_count--;
+ break;
+ }
+ }
+ if (i == CAM_SMMU_CB_MAX)
+ CAM_ERR(CAM_SMMU, "Error: hdl %x no matching tokens: %s",
+ handle, iommu_cb_set.cb_info[idx].name);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
+static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova,
+ int flags, void *token)
+{
+ char *cb_name;
+ int idx;
+ struct cam_smmu_work_payload *payload;
+
+ if (!token) {
+ CAM_ERR(CAM_SMMU, "Error: token is NULL");
+ CAM_ERR(CAM_SMMU, "Error: domain = %pK, device = %pK",
+ domain, dev);
+ CAM_ERR(CAM_SMMU, "iova = %lX, flags = %d", iova, flags);
+ return -EINVAL;
+ }
+
+ cb_name = (char *)token;
+ /* Check whether it is in the table */
+ for (idx = 0; idx < iommu_cb_set.cb_num; idx++) {
+ if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name))
+ break;
+ }
+
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: index is not valid, index = %d, token = %s",
+ idx, cb_name);
+ return -EINVAL;
+ }
+
+ if (++iommu_cb_set.cb_info[idx].pf_count > g_num_pf_handled) {
+ CAM_INFO(CAM_SMMU, "PF already handled %d %d %d",
+ g_num_pf_handled, idx,
+ iommu_cb_set.cb_info[idx].pf_count);
+ return -EINVAL;
+ }
+
+ payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
+ if (!payload)
+ return -EINVAL;
+
+ payload->domain = domain;
+ payload->dev = dev;
+ payload->iova = iova;
+ payload->flags = flags;
+ payload->token = token;
+ payload->idx = idx;
+
+ mutex_lock(&iommu_cb_set.payload_list_lock);
+ list_add_tail(&payload->list, &iommu_cb_set.payload_list);
+ mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+ cam_smmu_page_fault_work(&iommu_cb_set.smmu_work);
+
+ return -EINVAL;
+}
+
+static int cam_smmu_translate_dir_to_iommu_dir(
+ enum cam_smmu_map_dir dir)
+{
+ switch (dir) {
+ case CAM_SMMU_MAP_READ:
+ return IOMMU_READ;
+ case CAM_SMMU_MAP_WRITE:
+ return IOMMU_WRITE;
+ case CAM_SMMU_MAP_RW:
+ return IOMMU_READ|IOMMU_WRITE;
+ case CAM_SMMU_MAP_INVALID:
+ default:
+ CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", dir);
+ break;
+ };
+ return IOMMU_INVALID_DIR;
+}
+
+static enum dma_data_direction cam_smmu_translate_dir(
+ enum cam_smmu_map_dir dir)
+{
+ switch (dir) {
+ case CAM_SMMU_MAP_READ:
+ return DMA_FROM_DEVICE;
+ case CAM_SMMU_MAP_WRITE:
+ return DMA_TO_DEVICE;
+ case CAM_SMMU_MAP_RW:
+ return DMA_BIDIRECTIONAL;
+ case CAM_SMMU_MAP_INVALID:
+ default:
+ CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d",
+ (int)dir);
+ break;
+ }
+ return DMA_NONE;
+}
+
+void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
+{
+ unsigned int i;
+ int j = 0;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
+ INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+ INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_kernel_list);
+ iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
+ iommu_cb_set.cb_info[i].dev = NULL;
+ iommu_cb_set.cb_info[i].cb_count = 0;
+ iommu_cb_set.cb_info[i].pf_count = 0;
+ for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+ iommu_cb_set.cb_info[i].token[j] = NULL;
+ iommu_cb_set.cb_info[i].handler[j] = NULL;
+ }
+ if (ops == CAM_SMMU_TABLE_INIT)
+ mutex_init(&iommu_cb_set.cb_info[i].lock);
+ else
+ mutex_destroy(&iommu_cb_set.cb_info[i].lock);
+ }
+}
+
+static int cam_smmu_check_handle_unique(int hdl)
+{
+ int i;
+
+ if (hdl == HANDLE_INIT) {
+ CAM_DBG(CAM_SMMU,
+ "iommu handle is init number. Need to try again");
+ return 1;
+ }
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT)
+ continue;
+
+ if (iommu_cb_set.cb_info[i].handle == hdl) {
+ CAM_DBG(CAM_SMMU, "iommu handle %d conflicts",
+ (int)hdl);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * use low 2 bytes for handle cookie
+ */
+static int cam_smmu_create_iommu_handle(int idx)
+{
+ int rand, hdl = 0;
+
+ get_random_bytes(&rand, COOKIE_NUM_BYTE);
+ hdl = GET_SMMU_HDL(idx, rand);
+ CAM_DBG(CAM_SMMU, "create handle value = %x", (int)hdl);
+ return hdl;
+}
+
+static int cam_smmu_attach_device(int idx)
+{
+ int rc;
+ struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+ /* attach the mapping to device */
+ rc = arm_iommu_attach_device(cb->dev, cb->mapping);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: ARM IOMMU attach failed. ret = %d",
+ rc);
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+ int *hdl)
+{
+ int i;
+ int handle;
+
+ /* create handle and add in the iommu hardware table */
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
+ mutex_lock(&iommu_cb_set.cb_info[i].lock);
+ if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU,
+ "Error: %s already got handle 0x%x",
+ name,
+ iommu_cb_set.cb_info[i].handle);
+
+ if (iommu_cb_set.cb_info[i].is_secure)
+ iommu_cb_set.cb_info[i].secure_count++;
+
+ mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+ if (iommu_cb_set.cb_info[i].is_secure) {
+ *hdl = iommu_cb_set.cb_info[i].handle;
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ /* make sure handle is unique */
+ do {
+ handle = cam_smmu_create_iommu_handle(i);
+ } while (cam_smmu_check_handle_unique(handle));
+
+ /* put handle in the table */
+ iommu_cb_set.cb_info[i].handle = handle;
+ iommu_cb_set.cb_info[i].cb_count = 0;
+ if (iommu_cb_set.cb_info[i].is_secure)
+ iommu_cb_set.cb_info[i].secure_count++;
+ *hdl = handle;
+ CAM_DBG(CAM_SMMU, "%s creates handle 0x%x",
+ name, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+ return 0;
+ }
+ }
+
+ CAM_ERR(CAM_SMMU, "Error: Cannot find name %s or all handle exist",
+ name);
+ cam_smmu_print_table();
+ return -EINVAL;
+}
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+ dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ int err = 0;
+
+ if (!count) {
+ err = -EINVAL;
+ CAM_ERR(CAM_SMMU, "Page count is zero, size passed = %zu",
+ size);
+ goto bail;
+ }
+
+ scratch_map->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!scratch_map->bitmap) {
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ scratch_map->base = base;
+ scratch_map->bits = BITS_PER_BYTE * bitmap_size;
+ scratch_map->order = order;
+
+bail:
+ return err;
+}
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+ size_t size,
+ dma_addr_t *iova)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ /*
+ * Transparently, add a guard page to the total count of pages
+ * to be allocated
+ */
+ count++;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+
+ if (start > mapping->bits)
+ return -ENOMEM;
+
+ bitmap_set(mapping->bitmap, start, count);
+ *iova = mapping->base + (start << (mapping->order + PAGE_SHIFT));
+
+ return 0;
+}
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (!addr) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid address");
+ return -EINVAL;
+ }
+
+ if (start + count > mapping->bits) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid page bits in scratch map");
+ return -EINVAL;
+ }
+
+ /*
+ * Transparently, add a guard page to the total count of pages
+ * to be freed
+ */
+ count++;
+ bitmap_clear(mapping->bitmap, start, count);
+
+ return 0;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+ dma_addr_t virt_addr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->paddr == virt_addr) {
+ CAM_DBG(CAM_SMMU, "Found virtual address %lx",
+ (unsigned long)virt_addr);
+ return mapping;
+ }
+ }
+
+ CAM_ERR(CAM_SMMU, "Error: Cannot find virtual address %lx by index %d",
+ (unsigned long)virt_addr, idx);
+ return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+ int ion_fd)
+{
+ struct cam_dma_buff_info *mapping;
+
+ if (ion_fd < 0) {
+ CAM_ERR(CAM_SMMU, "Invalid fd %d", ion_fd);
+ return NULL;
+ }
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
+ return mapping;
+ }
+ }
+
+ CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
+
+ return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
+ struct dma_buf *buf)
+{
+ struct cam_dma_buff_info *mapping;
+
+ if (!buf) {
+ CAM_ERR(CAM_SMMU, "Invalid dma_buf");
+ return NULL;
+ }
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list,
+ list) {
+ if (mapping->buf == buf) {
+ CAM_DBG(CAM_SMMU, "find dma_buf %pK", buf);
+ return mapping;
+ }
+ }
+
+ CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
+
+ return NULL;
+}
+
+static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
+ int ion_fd)
+{
+ struct cam_sec_buff_info *mapping;
+
+ list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
+ return mapping;
+ }
+ }
+ CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
+ ion_fd, idx);
+ return NULL;
+}
+
+static void cam_smmu_clean_user_buffer_list(int idx)
+{
+ int ret;
+ struct cam_dma_buff_info *mapping_info, *temp;
+
+ list_for_each_entry_safe(mapping_info, temp,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ CAM_DBG(CAM_SMMU, "Free mapping address %pK, i = %d, fd = %d",
+ (void *)mapping_info->paddr, idx,
+ mapping_info->ion_fd);
+
+ if (mapping_info->ion_fd == 0xDEADBEEF)
+ /* Clean up scratch buffers */
+ ret = cam_smmu_free_scratch_buffer_remove_from_list(
+ mapping_info, idx);
+ else
+ /* Clean up regular mapped buffers */
+ ret = cam_smmu_unmap_buf_and_remove_from_list(
+ mapping_info,
+ idx);
+
+ if (ret < 0) {
+ CAM_ERR(CAM_SMMU, "Buffer delete failed: idx = %d",
+ idx);
+ CAM_ERR(CAM_SMMU,
+ "Buffer delete failed: addr = %lx, fd = %d",
+ (unsigned long)mapping_info->paddr,
+ mapping_info->ion_fd);
+ /*
+ * Ignore this error and continue to delete other
+ * buffers in the list
+ */
+ continue;
+ }
+ }
+}
+
+static void cam_smmu_clean_kernel_buffer_list(int idx)
+{
+ int ret;
+ struct cam_dma_buff_info *mapping_info, *temp;
+
+ list_for_each_entry_safe(mapping_info, temp,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+ CAM_DBG(CAM_SMMU,
+ "Free mapping address %pK, i = %d, dma_buf = %pK",
+ (void *)mapping_info->paddr, idx,
+ mapping_info->buf);
+
+ /* Clean up regular mapped buffers */
+ ret = cam_smmu_unmap_buf_and_remove_from_list(
+ mapping_info,
+ idx);
+
+ if (ret < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Buffer delete in kernel list failed: idx = %d",
+ idx);
+ CAM_ERR(CAM_SMMU,
+ "Buffer delete failed: addr = %lx, dma_buf = %pK",
+ (unsigned long)mapping_info->paddr,
+ mapping_info->buf);
+ /*
+ * Ignore this error and continue to delete other
+ * buffers in the list
+ */
+ continue;
+ }
+ }
+}
+
+static int cam_smmu_attach(int idx)
+{
+ int ret;
+
+ if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+ ret = -EALREADY;
+ } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+ ret = cam_smmu_attach_device(idx);
+ if (ret < 0) {
+ CAM_ERR(CAM_SMMU, "Error: ATTACH fail");
+ return -ENODEV;
+ }
+ iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
+ ret = 0;
+ } else {
+ CAM_ERR(CAM_SMMU, "Error: Not detach/attach: %d",
+ iommu_cb_set.cb_info[idx].state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cam_smmu_detach_device(int idx)
+{
+ int rc = 0;
+ struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+ /* detach the mapping to device if not already detached */
+ if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+ rc = -EALREADY;
+ } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+ arm_iommu_detach_device(cb->dev);
+ iommu_cb_set.cb_info[idx].state = CAM_SMMU_DETACH;
+ }
+
+ return rc;
+}
+
+static int cam_smmu_alloc_iova(size_t size,
+ int32_t smmu_hdl, uint32_t *iova)
+{
+ int rc = 0;
+ int idx;
+ uint32_t vaddr = 0;
+
+ if (!iova || !size || (smmu_hdl == HANDLE_INIT)) {
+ CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_SMMU, "Allocating iova size = %zu for smmu hdl=%X",
+ size, smmu_hdl);
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].shared_support) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Shared memory not supported for hdl = %X",
+ smmu_hdl);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ vaddr = gen_pool_alloc(iommu_cb_set.cb_info[idx].shared_mem_pool, size);
+ if (!vaddr)
+ return -ENOMEM;
+
+ *iova = vaddr;
+
+get_addr_end:
+ return rc;
+}
+
+static int cam_smmu_free_iova(uint32_t addr, size_t size,
+ int32_t smmu_hdl)
+{
+ int rc = 0;
+ int idx;
+
+ if (!size || (smmu_hdl == HANDLE_INIT)) {
+ CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ gen_pool_free(iommu_cb_set.cb_info[idx].shared_mem_pool, addr, size);
+
+get_addr_end:
+ return rc;
+}
+
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+ dma_addr_t *iova,
+ uintptr_t *cpuva,
+ size_t *len)
+{
+ int rc;
+ int32_t idx;
+ size_t firmware_len = 0;
+ size_t firmware_start = 0;
+ struct iommu_domain *domain;
+
+ if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) {
+ CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].firmware_support) {
+ CAM_ERR(CAM_SMMU,
+ "Firmware memory not supported for this SMMU handle");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_fw_allocated) {
+ CAM_ERR(CAM_SMMU, "Trying to allocate twice");
+ rc = -ENOMEM;
+ goto unlock_and_end;
+ }
+
+ firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+ firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+ CAM_DBG(CAM_SMMU, "Firmware area len from DT = %zu", firmware_len);
+
+ icp_fw.fw_kva = dma_alloc_coherent(icp_fw.fw_dev,
+ firmware_len,
+ &icp_fw.fw_dma_hdl,
+ GFP_KERNEL);
+ if (!icp_fw.fw_kva) {
+ CAM_ERR(CAM_SMMU, "FW memory alloc failed");
+ rc = -ENOMEM;
+ goto unlock_and_end;
+ } else {
+ CAM_DBG(CAM_SMMU, "DMA alloc returned fw = %pK, hdl = %pK",
+ icp_fw.fw_kva, (void *)icp_fw.fw_dma_hdl);
+ }
+
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+ rc = iommu_map(domain,
+ firmware_start,
+ icp_fw.fw_dma_hdl,
+ firmware_len,
+ IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
+
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "Failed to map FW into IOMMU");
+ rc = -ENOMEM;
+ goto alloc_fail;
+ }
+ iommu_cb_set.cb_info[idx].is_fw_allocated = true;
+
+ *iova = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+ *cpuva = (uintptr_t)icp_fw.fw_kva;
+ *len = firmware_len;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+ return rc;
+
+alloc_fail:
+ dma_free_coherent(icp_fw.fw_dev,
+ firmware_len,
+ icp_fw.fw_kva,
+ icp_fw.fw_dma_hdl);
+unlock_and_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_alloc_firmware);
+
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl)
+{
+ int rc = 0;
+ int32_t idx;
+ size_t firmware_len = 0;
+ size_t firmware_start = 0;
+ struct iommu_domain *domain;
+ size_t unmapped = 0;
+
+ if (smmu_hdl == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].firmware_support) {
+ CAM_ERR(CAM_SMMU,
+ "Firmware memory not supported for this SMMU handle");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (!iommu_cb_set.cb_info[idx].is_fw_allocated) {
+ CAM_ERR(CAM_SMMU,
+ "Trying to deallocate firmware that is not allocated");
+ rc = -ENOMEM;
+ goto unlock_and_end;
+ }
+
+ firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+ firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+ unmapped = iommu_unmap(domain,
+ firmware_start,
+ firmware_len);
+
+ if (unmapped != firmware_len) {
+ CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu",
+ unmapped,
+ firmware_len);
+ rc = -EINVAL;
+ }
+
+ dma_free_coherent(icp_fw.fw_dev,
+ firmware_len,
+ icp_fw.fw_kva,
+ icp_fw.fw_dma_hdl);
+
+ icp_fw.fw_kva = 0;
+ icp_fw.fw_dma_hdl = 0;
+
+ iommu_cb_set.cb_info[idx].is_fw_allocated = false;
+
+unlock_and_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_dealloc_firmware);
+
+int cam_smmu_alloc_qdss(int32_t smmu_hdl,
+ dma_addr_t *iova,
+ size_t *len)
+{
+ int rc;
+ int32_t idx;
+ size_t qdss_len = 0;
+ size_t qdss_start = 0;
+ dma_addr_t qdss_phy_addr;
+ struct iommu_domain *domain;
+
+ if (!iova || !len || (smmu_hdl == HANDLE_INIT)) {
+ CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].qdss_support) {
+ CAM_ERR(CAM_SMMU,
+ "QDSS memory not supported for this SMMU handle");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_qdss_allocated) {
+ CAM_ERR(CAM_SMMU, "Trying to allocate twice");
+ rc = -ENOMEM;
+ goto unlock_and_end;
+ }
+
+ qdss_len = iommu_cb_set.cb_info[idx].qdss_info.iova_len;
+ qdss_start = iommu_cb_set.cb_info[idx].qdss_info.iova_start;
+ qdss_phy_addr = iommu_cb_set.cb_info[idx].qdss_phy_addr;
+ CAM_DBG(CAM_SMMU, "QDSS area len from DT = %zu", qdss_len);
+
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+ rc = iommu_map(domain,
+ qdss_start,
+ qdss_phy_addr,
+ qdss_len,
+ IOMMU_READ|IOMMU_WRITE);
+
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "Failed to map QDSS into IOMMU");
+ goto unlock_and_end;
+ }
+
+ iommu_cb_set.cb_info[idx].is_qdss_allocated = true;
+
+ *iova = iommu_cb_set.cb_info[idx].qdss_info.iova_start;
+ *len = qdss_len;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+ return rc;
+
+unlock_and_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_alloc_qdss);
+
+int cam_smmu_dealloc_qdss(int32_t smmu_hdl)
+{
+ int rc = 0;
+ int32_t idx;
+ size_t qdss_len = 0;
+ size_t qdss_start = 0;
+ struct iommu_domain *domain;
+ size_t unmapped = 0;
+
+ if (smmu_hdl == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].qdss_support) {
+ CAM_ERR(CAM_SMMU,
+ "QDSS memory not supported for this SMMU handle");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (!iommu_cb_set.cb_info[idx].is_qdss_allocated) {
+ CAM_ERR(CAM_SMMU,
+ "Trying to deallocate qdss that is not allocated");
+ rc = -ENOMEM;
+ goto unlock_and_end;
+ }
+
+ qdss_len = iommu_cb_set.cb_info[idx].qdss_info.iova_len;
+ qdss_start = iommu_cb_set.cb_info[idx].qdss_info.iova_start;
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+ unmapped = iommu_unmap(domain, qdss_start, qdss_len);
+
+ if (unmapped != qdss_len) {
+ CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu",
+ unmapped,
+ qdss_len);
+ rc = -EINVAL;
+ }
+
+ iommu_cb_set.cb_info[idx].is_qdss_allocated = false;
+
+unlock_and_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_dealloc_qdss);
+
+int cam_smmu_get_region_info(int32_t smmu_hdl,
+ enum cam_smmu_region_id region_id,
+ struct cam_smmu_region_info *region_info)
+{
+ int32_t idx;
+ struct cam_context_bank_info *cb = NULL;
+
+ if (!region_info) {
+ CAM_ERR(CAM_SMMU, "Invalid region_info pointer");
+ return -EINVAL;
+ }
+
+ if (smmu_hdl == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Invalid handle");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU, "Handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ cb = &iommu_cb_set.cb_info[idx];
+ if (!cb) {
+ CAM_ERR(CAM_SMMU, "SMMU context bank pointer invalid");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ switch (region_id) {
+ case CAM_SMMU_REGION_FIRMWARE:
+ if (!cb->firmware_support) {
+ CAM_ERR(CAM_SMMU, "Firmware not supported");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->firmware_info.iova_start;
+ region_info->iova_len = cb->firmware_info.iova_len;
+ break;
+ case CAM_SMMU_REGION_SHARED:
+ if (!cb->shared_support) {
+ CAM_ERR(CAM_SMMU, "Shared mem not supported");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->shared_info.iova_start;
+ region_info->iova_len = cb->shared_info.iova_len;
+ break;
+ case CAM_SMMU_REGION_SCRATCH:
+ if (!cb->scratch_buf_support) {
+ CAM_ERR(CAM_SMMU, "Scratch memory not supported");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->scratch_info.iova_start;
+ region_info->iova_len = cb->scratch_info.iova_len;
+ break;
+ case CAM_SMMU_REGION_IO:
+ if (!cb->io_support) {
+ CAM_ERR(CAM_SMMU, "IO memory not supported");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->io_info.iova_start;
+ region_info->iova_len = cb->io_info.iova_len;
+ break;
+ case CAM_SMMU_REGION_SECHEAP:
+ if (!cb->secheap_support) {
+ CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->secheap_info.iova_start;
+ region_info->iova_len = cb->secheap_info.iova_len;
+ break;
+ default:
+ CAM_ERR(CAM_SMMU, "Invalid region id: %d for smmu hdl: %X",
+ smmu_hdl, region_id);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return 0;
+}
+EXPORT_SYMBOL(cam_smmu_get_region_info);
+
+int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
+ struct dma_buf *buf,
+ dma_addr_t *iova,
+ size_t *request_len)
+{
+ struct secheap_buf_info *secheap_buf = NULL;
+ size_t size = 0;
+ uint32_t sec_heap_iova = 0;
+ size_t sec_heap_iova_len = 0;
+ int idx;
+ int rc = 0;
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].secheap_support) {
+ CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+
+ if (iommu_cb_set.cb_info[idx].is_secheap_allocated) {
+ CAM_ERR(CAM_SMMU, "Trying to allocate secheap twice");
+ rc = -ENOMEM;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+ }
+
+ if (IS_ERR_OR_NULL(buf)) {
+ rc = PTR_ERR(buf);
+ CAM_ERR(CAM_SMMU,
+ "Error: dma get buf failed. rc = %d", rc);
+ goto err_out;
+ }
+
+ secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
+ secheap_buf->buf = buf;
+ secheap_buf->attach = dma_buf_attach(secheap_buf->buf,
+ iommu_cb_set.cb_info[idx].dev);
+ if (IS_ERR_OR_NULL(secheap_buf->attach)) {
+ rc = PTR_ERR(secheap_buf->attach);
+ CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
+ goto err_put;
+ }
+
+ secheap_buf->table = dma_buf_map_attachment(secheap_buf->attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(secheap_buf->table)) {
+ rc = PTR_ERR(secheap_buf->table);
+ CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
+ goto err_detach;
+ }
+
+ sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
+ sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
+ size = iommu_map_sg(iommu_cb_set.cb_info[idx].mapping->domain,
+ sec_heap_iova,
+ secheap_buf->table->sgl,
+ secheap_buf->table->nents,
+ IOMMU_READ | IOMMU_WRITE);
+ if (size != sec_heap_iova_len) {
+ CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
+ goto err_unmap_sg;
+ }
+
+ iommu_cb_set.cb_info[idx].is_secheap_allocated = true;
+ *iova = (uint32_t)sec_heap_iova;
+ *request_len = sec_heap_iova_len;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+ return rc;
+
+err_unmap_sg:
+ dma_buf_unmap_attachment(secheap_buf->attach,
+ secheap_buf->table,
+ DMA_BIDIRECTIONAL);
+err_detach:
+ dma_buf_detach(secheap_buf->buf,
+ secheap_buf->attach);
+err_put:
+ dma_buf_put(secheap_buf->buf);
+err_out:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_reserve_sec_heap);
+
+int cam_smmu_release_sec_heap(int32_t smmu_hdl)
+{
+ int idx;
+ size_t size = 0;
+ uint32_t sec_heap_iova = 0;
+ size_t sec_heap_iova_len = 0;
+ struct secheap_buf_info *secheap_buf = NULL;
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].secheap_support) {
+ CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+ return -EINVAL;
+ }
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+
+ if (!iommu_cb_set.cb_info[idx].is_secheap_allocated) {
+ CAM_ERR(CAM_SMMU, "Trying to release secheap twice");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENOMEM;
+ }
+
+ secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
+ sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
+ sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
+
+ size = iommu_unmap(iommu_cb_set.cb_info[idx].mapping->domain,
+ sec_heap_iova,
+ sec_heap_iova_len);
+ if (size != sec_heap_iova_len) {
+ CAM_ERR(CAM_SMMU, "Failed: Unmapped = %zu, requested = %zu",
+ size,
+ sec_heap_iova_len);
+ }
+
+ dma_buf_unmap_attachment(secheap_buf->attach,
+ secheap_buf->table, DMA_BIDIRECTIONAL);
+ dma_buf_detach(secheap_buf->buf, secheap_buf->attach);
+ dma_buf_put(secheap_buf->buf);
+ iommu_cb_set.cb_info[idx].is_secheap_allocated = false;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cam_smmu_release_sec_heap);
+
+static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
+ int idx, enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id,
+ struct cam_dma_buff_info **mapping_info)
+{
+ struct dma_buf_attachment *attach = NULL;
+ struct sg_table *table = NULL;
+ struct iommu_domain *domain;
+ size_t size = 0;
+ uint32_t iova = 0;
+ int rc = 0;
+
+ if (IS_ERR_OR_NULL(buf)) {
+ rc = PTR_ERR(buf);
+ CAM_ERR(CAM_SMMU,
+ "Error: dma get buf failed. rc = %d", rc);
+ goto err_out;
+ }
+
+ if (!mapping_info) {
+ rc = -EINVAL;
+ CAM_ERR(CAM_SMMU, "Error: mapping_info is invalid");
+ goto err_out;
+ }
+
+ attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ rc = PTR_ERR(attach);
+ CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
+ goto err_put;
+ }
+
+ table = dma_buf_map_attachment(attach, dma_dir);
+ if (IS_ERR_OR_NULL(table)) {
+ rc = PTR_ERR(table);
+ CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
+ goto err_detach;
+ }
+
+ if (region_id == CAM_SMMU_REGION_SHARED) {
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+ if (!domain) {
+ CAM_ERR(CAM_SMMU, "CB has no domain set");
+ goto err_unmap_sg;
+ }
+
+ rc = cam_smmu_alloc_iova(*len_ptr,
+ iommu_cb_set.cb_info[idx].handle,
+ &iova);
+
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU,
+ "IOVA alloc failed for shared memory");
+ goto err_unmap_sg;
+ }
+
+ size = iommu_map_sg(domain, iova, table->sgl, table->nents,
+ IOMMU_READ | IOMMU_WRITE);
+
+ if (size < 0) {
+ CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
+ rc = cam_smmu_free_iova(iova,
+ size, iommu_cb_set.cb_info[idx].handle);
+ if (rc)
+ CAM_ERR(CAM_SMMU, "IOVA free failed");
+ rc = -ENOMEM;
+ goto err_unmap_sg;
+ } else {
+ CAM_DBG(CAM_SMMU, "iommu_map_sg returned %zu", size);
+ *paddr_ptr = iova;
+ *len_ptr = size;
+ }
+ } else if (region_id == CAM_SMMU_REGION_IO) {
+ rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev,
+ table->sgl, table->nents, dma_dir, buf);
+
+ if (rc != table->nents) {
+ CAM_ERR(CAM_SMMU, "Error: msm_dma_map_sg_lazy failed");
+ rc = -ENOMEM;
+ goto err_unmap_sg;
+ } else {
+ *paddr_ptr = sg_dma_address(table->sgl);
+ *len_ptr = (size_t)sg_dma_len(table->sgl);
+ }
+ } else {
+ CAM_ERR(CAM_SMMU, "Error: Wrong region id passed");
+ rc = -EINVAL;
+ goto err_unmap_sg;
+ }
+
+ if (table->sgl) {
+ CAM_DBG(CAM_SMMU,
+ "DMA buf: %pK, device: %pK, attach: %pK, table: %pK",
+ (void *)buf,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)attach, (void *)table);
+ CAM_DBG(CAM_SMMU, "table sgl: %pK, rc: %d, dma_address: 0x%x",
+ (void *)table->sgl, rc,
+ (unsigned int)table->sgl->dma_address);
+ } else {
+ rc = -EINVAL;
+ CAM_ERR(CAM_SMMU, "Error: table sgl is null");
+ goto err_unmap_sg;
+ }
+
+ /* fill up mapping_info */
+ *mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!(*mapping_info)) {
+ rc = -ENOSPC;
+ goto err_alloc;
+ }
+
+ (*mapping_info)->buf = buf;
+ (*mapping_info)->attach = attach;
+ (*mapping_info)->table = table;
+ (*mapping_info)->paddr = *paddr_ptr;
+ (*mapping_info)->len = *len_ptr;
+ (*mapping_info)->dir = dma_dir;
+ (*mapping_info)->ref_count = 1;
+ (*mapping_info)->region_id = region_id;
+
+ if (!*paddr_ptr || !*len_ptr) {
+ CAM_ERR(CAM_SMMU, "Error: Space Allocation failed");
+ kfree(*mapping_info);
+ rc = -ENOSPC;
+ goto err_alloc;
+ }
+ CAM_DBG(CAM_SMMU, "dma_buf = %pK, dev = %pK, paddr= %pK, len = %u",
+ buf, (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+ return 0;
+
+err_alloc:
+ if (region_id == CAM_SMMU_REGION_SHARED) {
+ cam_smmu_free_iova(iova,
+ size,
+ iommu_cb_set.cb_info[idx].handle);
+
+ iommu_unmap(iommu_cb_set.cb_info[idx].mapping->domain,
+ *paddr_ptr,
+ *len_ptr);
+ } else if (region_id == CAM_SMMU_REGION_IO) {
+ msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+ table->sgl,
+ table->nents,
+ dma_dir,
+ buf);
+ }
+err_unmap_sg:
+ dma_buf_unmap_attachment(attach, table, dma_dir);
+err_detach:
+ dma_buf_detach(buf, attach);
+err_put:
+ dma_buf_put(buf);
+err_out:
+ return rc;
+}
+
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info = NULL;
+ struct dma_buf *buf = NULL;
+
+ /* returns the dma_buf structure related to an fd */
+ buf = dma_buf_get(ion_fd);
+
+ rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
+ region_id, &mapping_info);
+
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "buffer validation failure");
+ return rc;
+ }
+
+ mapping_info->ion_fd = ion_fd;
+ /* add to the list */
+ list_add(&mapping_info->list,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+ return 0;
+}
+
+static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
+ struct dma_buf *buf, enum dma_data_direction dma_dir,
+ dma_addr_t *paddr_ptr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id)
+{
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info = NULL;
+
+ rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
+ region_id, &mapping_info);
+
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "buffer validation failure");
+ return rc;
+ }
+
+ mapping_info->ion_fd = -1;
+
+ /* add to the list */
+ list_add(&mapping_info->list,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list);
+
+ return 0;
+}
+
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx)
+{
+ int rc;
+ size_t size;
+ struct iommu_domain *domain;
+
+ if ((!mapping_info->buf) || (!mapping_info->table) ||
+ (!mapping_info->attach)) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params dev = %pK, table = %pK",
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)mapping_info->table);
+ CAM_ERR(CAM_SMMU, "Error:dma_buf = %pK, attach = %pK",
+ (void *)mapping_info->buf,
+ (void *)mapping_info->attach);
+ return -EINVAL;
+ }
+
+ if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
+ CAM_DBG(CAM_SMMU,
+ "Removing SHARED buffer paddr = %pK, len = %zu",
+ (void *)mapping_info->paddr, mapping_info->len);
+
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+ size = iommu_unmap(domain,
+ mapping_info->paddr,
+ mapping_info->len);
+
+ if (size != mapping_info->len) {
+ CAM_ERR(CAM_SMMU, "IOMMU unmap failed");
+ CAM_ERR(CAM_SMMU, "Unmapped = %zu, requested = %zu",
+ size,
+ mapping_info->len);
+ }
+
+ rc = cam_smmu_free_iova(mapping_info->paddr,
+ mapping_info->len,
+ iommu_cb_set.cb_info[idx].handle);
+
+ if (rc)
+ CAM_ERR(CAM_SMMU, "IOVA free failed");
+
+ } else if (mapping_info->region_id == CAM_SMMU_REGION_IO) {
+ msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+ mapping_info->table->sgl, mapping_info->table->nents,
+ mapping_info->dir, mapping_info->buf);
+ }
+
+ dma_buf_unmap_attachment(mapping_info->attach,
+ mapping_info->table, mapping_info->dir);
+ dma_buf_detach(mapping_info->buf, mapping_info->attach);
+ dma_buf_put(mapping_info->buf);
+
+ mapping_info->buf = NULL;
+
+ list_del_init(&mapping_info->list);
+
+ /* free one buffer */
+ kfree(mapping_info);
+ return 0;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
+ int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ if (mapping->ion_fd == ion_fd) {
+ *paddr_ptr = mapping->paddr;
+ *len_ptr = mapping->len;
+ return CAM_SMMU_BUFF_EXIST;
+ }
+ }
+
+ return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_dma_buf_in_list(int idx,
+ struct dma_buf *buf, dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+ if (mapping->buf == buf) {
+ *paddr_ptr = mapping->paddr;
+ *len_ptr = mapping->len;
+ return CAM_SMMU_BUFF_EXIST;
+ }
+ }
+
+ return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
+ int ion_fd, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ struct cam_sec_buff_info *mapping;
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ *paddr_ptr = mapping->paddr;
+ *len_ptr = mapping->len;
+ mapping->ref_count++;
+ return CAM_SMMU_BUFF_EXIST;
+ }
+ }
+
+ return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx,
+ int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+ struct cam_sec_buff_info *mapping;
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ *paddr_ptr = mapping->paddr;
+ *len_ptr = mapping->len;
+ return CAM_SMMU_BUFF_EXIST;
+ }
+ }
+
+ return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+int cam_smmu_get_handle(char *identifier, int *handle_ptr)
+{
+ int ret = 0;
+
+ if (!identifier) {
+ CAM_ERR(CAM_SMMU, "Error: iommu hardware name is NULL");
+ return -EINVAL;
+ }
+
+ if (!handle_ptr) {
+ CAM_ERR(CAM_SMMU, "Error: handle pointer is NULL");
+ return -EINVAL;
+ }
+
+ /* create and put handle in the table */
+ ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
+ if (ret < 0)
+ CAM_ERR(CAM_SMMU, "Error: %s get handle fail", identifier);
+
+ return ret;
+}
+EXPORT_SYMBOL(cam_smmu_get_handle);
+
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops)
+{
+ int ret = 0, idx;
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU, "Error: Index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ switch (ops) {
+ case CAM_SMMU_ATTACH: {
+ ret = cam_smmu_attach(idx);
+ break;
+ }
+ case CAM_SMMU_DETACH: {
+ ret = cam_smmu_detach_device(idx);
+ break;
+ }
+ case CAM_SMMU_VOTE:
+ case CAM_SMMU_DEVOTE:
+ default:
+ CAM_ERR(CAM_SMMU, "Error: idx = %d, ops = %d", idx, ops);
+ ret = -EINVAL;
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return ret;
+}
+EXPORT_SYMBOL(cam_smmu_ops);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+ size_t virt_len,
+ size_t phys_len,
+ unsigned int iommu_dir,
+ dma_addr_t *virt_addr)
+{
+ unsigned long nents = virt_len / phys_len;
+ struct cam_dma_buff_info *mapping_info = NULL;
+ size_t unmapped;
+ dma_addr_t iova = 0;
+ struct scatterlist *sg;
+ int i = 0;
+ int rc;
+ struct iommu_domain *domain = NULL;
+ struct page *page;
+ struct sg_table *table = NULL;
+
+ CAM_DBG(CAM_SMMU, "nents = %lu, idx = %d, virt_len = %zx",
+ nents, idx, virt_len);
+ CAM_DBG(CAM_SMMU, "phys_len = %zx, iommu_dir = %d, virt_addr = %pK",
+ phys_len, iommu_dir, virt_addr);
+
+ /*
+ * This table will go inside the 'mapping' structure
+ * where it will be held until put_scratch_buffer is called
+ */
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ rc = -ENOMEM;
+ goto err_table_alloc;
+ }
+
+ rc = sg_alloc_table(table, nents, GFP_KERNEL);
+ if (rc < 0) {
+ rc = -EINVAL;
+ goto err_sg_alloc;
+ }
+
+ page = alloc_pages(GFP_KERNEL, get_order(phys_len));
+ if (!page) {
+ rc = -ENOMEM;
+ goto err_page_alloc;
+ }
+
+ /* Now we create the sg list */
+ for_each_sg(table->sgl, sg, table->nents, i)
+ sg_set_page(sg, page, phys_len, 0);
+
+
+ /* Get the domain from within our cb_set struct and map it*/
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+ rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map,
+ virt_len, &iova);
+
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Could not find valid iova for scratch buffer");
+ goto err_iommu_map;
+ }
+
+ if (iommu_map_sg(domain,
+ iova,
+ table->sgl,
+ table->nents,
+ iommu_dir) != virt_len) {
+ CAM_ERR(CAM_SMMU, "iommu_map_sg() failed");
+ goto err_iommu_map;
+ }
+
+ /* Now update our mapping information within the cb_set struct */
+ mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!mapping_info) {
+ rc = -ENOMEM;
+ goto err_mapping_info;
+ }
+
+ mapping_info->ion_fd = 0xDEADBEEF;
+ mapping_info->buf = NULL;
+ mapping_info->attach = NULL;
+ mapping_info->table = table;
+ mapping_info->paddr = iova;
+ mapping_info->len = virt_len;
+ mapping_info->iommu_dir = iommu_dir;
+ mapping_info->ref_count = 1;
+ mapping_info->phys_len = phys_len;
+ mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
+
+ CAM_DBG(CAM_SMMU, "paddr = %pK, len = %zx, phys_len = %zx",
+ (void *)mapping_info->paddr,
+ mapping_info->len, mapping_info->phys_len);
+
+ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+ *virt_addr = (dma_addr_t)iova;
+
+ CAM_DBG(CAM_SMMU, "mapped virtual address = %lx",
+ (unsigned long)*virt_addr);
+ return 0;
+
+err_mapping_info:
+ unmapped = iommu_unmap(domain, iova, virt_len);
+ if (unmapped != virt_len)
+ CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
+ unmapped, virt_len);
+err_iommu_map:
+ __free_pages(page, get_order(phys_len));
+err_page_alloc:
+ sg_free_table(table);
+err_sg_alloc:
+ kfree(table);
+err_table_alloc:
+ return rc;
+}
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx)
+{
+ int rc = 0;
+ size_t unmapped;
+ struct iommu_domain *domain =
+ iommu_cb_set.cb_info[idx].mapping->domain;
+ struct scratch_mapping *scratch_map =
+ &iommu_cb_set.cb_info[idx].scratch_map;
+
+ if (!mapping_info->table) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params: dev = %pK, table = %pK",
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)mapping_info->table);
+ return -EINVAL;
+ }
+
+ /* Clean up the mapping_info struct from the list */
+ unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
+ if (unmapped != mapping_info->len)
+ CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
+ unmapped, mapping_info->len);
+
+ rc = cam_smmu_free_scratch_va(scratch_map,
+ mapping_info->paddr,
+ mapping_info->len);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid iova while freeing scratch buffer");
+ rc = -EINVAL;
+ }
+
+ __free_pages(sg_page(mapping_info->table->sgl),
+ get_order(mapping_info->phys_len));
+ sg_free_table(mapping_info->table);
+ kfree(mapping_info->table);
+ list_del_init(&mapping_info->list);
+
+ kfree(mapping_info);
+ mapping_info = NULL;
+
+ return rc;
+}
+
+int cam_smmu_get_scratch_iova(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr,
+ size_t virt_len,
+ size_t phys_len)
+{
+ int idx, rc;
+ unsigned int iommu_dir;
+
+ if (!paddr_ptr || !virt_len || !phys_len) {
+ CAM_ERR(CAM_SMMU, "Error: Input pointer or lengths invalid");
+ return -EINVAL;
+ }
+
+ if (virt_len < phys_len) {
+ CAM_ERR(CAM_SMMU, "Error: virt_len > phys_len");
+ return -EINVAL;
+ }
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
+ if (iommu_dir == IOMMU_INVALID_DIR) {
+ CAM_ERR(CAM_SMMU,
+ "Error: translate direction failed. dir = %d", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Context bank does not support scratch bufs");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ CAM_DBG(CAM_SMMU, "smmu handle = %x, idx = %d, dir = %d",
+ handle, idx, dir);
+ CAM_DBG(CAM_SMMU, "virt_len = %zx, phys_len = %zx",
+ phys_len, virt_len);
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ CAM_ERR(CAM_SMMU,
+ "Err:Dev %s should call SMMU attach before map buffer",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
+ CAM_ERR(CAM_SMMU,
+ "Requested scratch buffer length not page aligned");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!IS_ALIGNED(virt_len, phys_len)) {
+ CAM_ERR(CAM_SMMU,
+ "Requested virt length not aligned with phys length");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx,
+ virt_len,
+ phys_len,
+ iommu_dir,
+ paddr_ptr);
+ if (rc < 0)
+ CAM_ERR(CAM_SMMU, "Error: mapping or add list fail");
+
+error:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+int cam_smmu_put_scratch_iova(int handle,
+ dma_addr_t paddr)
+{
+ int idx;
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info;
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Context bank does not support scratch buffers");
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ /* Based on virtual address and index, we can find mapping info
+ * of the scratch buffer
+ */
+ mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
+ if (!mapping_info) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid params");
+ rc = -ENODEV;
+ goto handle_err;
+ }
+
+ /* unmapping one buffer from device */
+ rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+ goto handle_err;
+ }
+
+handle_err:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, struct ion_client *client,
+ dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ int rc = 0;
+ struct ion_handle *i_handle = NULL;
+ struct cam_sec_buff_info *mapping_info;
+
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ *len_ptr = (size_t)0;
+
+ i_handle = ion_import_dma_buf_fd(client, ion_fd);
+ if (IS_ERR_OR_NULL((void *)(i_handle))) {
+ CAM_ERR(CAM_SMMU, "ion import dma buffer failed");
+ return -EINVAL;
+ }
+
+ /* return addr and len to client */
+ rc = ion_phys(client, i_handle, paddr_ptr, len_ptr);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "ION Get Physical failed, rc: %d",
+ rc);
+ return rc;
+ }
+
+ /* fill up mapping_info */
+ mapping_info = kzalloc(sizeof(struct cam_sec_buff_info), GFP_KERNEL);
+ if (!mapping_info)
+ return -ENOMEM;
+
+ mapping_info->ion_fd = ion_fd;
+ mapping_info->paddr = *paddr_ptr;
+ mapping_info->len = *len_ptr;
+ mapping_info->dir = dma_dir;
+ mapping_info->ref_count = 1;
+ mapping_info->i_hdl = i_handle;
+ mapping_info->i_client = client;
+
+ CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u",
+ ion_fd,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+ /* add to the list */
+ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+ return rc;
+}
+
+int cam_smmu_map_stage2_iova(int handle,
+ int ion_fd, enum cam_smmu_map_dir dir,
+ struct ion_client *client, ion_phys_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ int idx, rc;
+ enum dma_data_direction dma_dir;
+ enum cam_smmu_buf_state buf_state;
+
+ if (!paddr_ptr || !len_ptr) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid inputs, paddr_ptr:%pK, len_ptr: %pK",
+ paddr_ptr, len_ptr);
+ return -EINVAL;
+ }
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ *len_ptr = (size_t)0;
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ if (dma_dir == DMA_NONE) {
+ CAM_ERR(CAM_SMMU,
+ "Error: translate direction failed. dir = %d", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if ((handle == HANDLE_INIT) ||
+ (idx < 0) ||
+ (idx >= iommu_cb_set.cb_num)) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't map secure mem to non secure cb");
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, paddr_ptr,
+ len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_EXIST) {
+ CAM_DBG(CAM_SMMU, "fd:%d already in list, give same addr back",
+ ion_fd);
+ rc = 0;
+ goto get_addr_end;
+ }
+ rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+ client, paddr_ptr, len_ptr);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: mapping or add list fail");
+ goto get_addr_end;
+ }
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_stage2_iova);
+
+static int cam_smmu_secure_unmap_buf_and_remove_from_list(
+ struct cam_sec_buff_info *mapping_info,
+ int idx)
+{
+ if (!mapping_info) {
+ CAM_ERR(CAM_SMMU, "Error: List doesn't exist");
+ return -EINVAL;
+ }
+ ion_free(mapping_info->i_client, mapping_info->i_hdl);
+ list_del_init(&mapping_info->list);
+
+ CAM_DBG(CAM_SMMU, "unmap fd: %d, idx : %d", mapping_info->ion_fd, idx);
+
+ /* free one buffer */
+ kfree(mapping_info);
+ return 0;
+}
+
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd)
+{
+ int idx, rc;
+ struct cam_sec_buff_info *mapping_info;
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if ((handle == HANDLE_INIT) ||
+ (idx < 0) ||
+ (idx >= iommu_cb_set.cb_num)) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't unmap secure mem from non secure cb");
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ /* based on ion fd and index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_sec_buf_idx(idx, ion_fd);
+ if (!mapping_info) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params! idx = %d, fd = %d",
+ idx, ion_fd);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ mapping_info->ref_count--;
+ if (mapping_info->ref_count > 0) {
+ CAM_DBG(CAM_SMMU,
+ "idx: %d fd = %d ref_count: %d",
+ idx, ion_fd, mapping_info->ref_count);
+ rc = 0;
+ goto put_addr_end;
+ }
+ mapping_info->ref_count = 0;
+
+ /* unmapping one buffer from device */
+ rc = cam_smmu_secure_unmap_buf_and_remove_from_list(mapping_info, idx);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+ goto put_addr_end;
+ }
+
+put_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_stage2_iova);
+
+static int cam_smmu_map_iova_validate_params(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id)
+{
+ int idx, rc = 0;
+ enum dma_data_direction dma_dir;
+
+ if (!paddr_ptr || !len_ptr) {
+ CAM_ERR(CAM_SMMU, "Input pointers are invalid");
+ return -EINVAL;
+ }
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Invalid handle");
+ return -EINVAL;
+ }
+
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ if (region_id != CAM_SMMU_REGION_SHARED)
+ *len_ptr = (size_t)0;
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ if (dma_dir == DMA_NONE) {
+ CAM_ERR(CAM_SMMU, "translate direction failed. dir = %d", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU, "handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_smmu_map_user_iova(int handle, int ion_fd,
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+ int idx, rc = 0;
+ enum cam_smmu_buf_state buf_state;
+ enum dma_data_direction dma_dir;
+
+ rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr,
+ len_ptr, region_id);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed");
+ return rc;
+ }
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ idx = GET_SMMU_TABLE_IDX(handle);
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't map non-secure mem to secure cb");
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ CAM_ERR(CAM_SMMU,
+ "Err:Dev %s should call SMMU attach before map buffer",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_EXIST) {
+ CAM_ERR(CAM_SMMU,
+ "ion_fd: %d already in the list", ion_fd);
+ rc = -EALREADY;
+ goto get_addr_end;
+ }
+
+ rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+ paddr_ptr, len_ptr, region_id);
+ if (rc < 0)
+ CAM_ERR(CAM_SMMU, "mapping or add list fail");
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_user_iova);
+
+int cam_smmu_map_kernel_iova(int handle, struct dma_buf *buf,
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+ int idx, rc = 0;
+ enum cam_smmu_buf_state buf_state;
+ enum dma_data_direction dma_dir;
+
+ rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr,
+ len_ptr, region_id);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed");
+ return rc;
+ }
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ idx = GET_SMMU_TABLE_IDX(handle);
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't map non-secure mem to secure cb");
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ CAM_ERR(CAM_SMMU,
+ "Err:Dev %s should call SMMU attach before map buffer",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_dma_buf_in_list(idx, buf,
+ paddr_ptr, len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_EXIST) {
+ CAM_ERR(CAM_SMMU,
+ "dma_buf :%pK already in the list", buf);
+ rc = -EALREADY;
+ goto get_addr_end;
+ }
+
+ rc = cam_smmu_map_kernel_buffer_and_add_to_list(idx, buf, dma_dir,
+ paddr_ptr, len_ptr, region_id);
+ if (rc < 0)
+ CAM_ERR(CAM_SMMU, "mapping or add list fail");
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_kernel_iova);
+
+int cam_smmu_get_iova(int handle, int ion_fd,
+ dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+ int idx, rc = 0;
+ enum cam_smmu_buf_state buf_state;
+
+ if (!paddr_ptr || !len_ptr) {
+ CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
+ return -EINVAL;
+ }
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ *len_ptr = (size_t)0;
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ if (iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't get non-secure mem from secure cb");
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+ CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_iova);
+
+int cam_smmu_get_stage2_iova(int handle, int ion_fd,
+ dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+ int idx, rc = 0;
+ enum cam_smmu_buf_state buf_state;
+
+ if (!paddr_ptr || !len_ptr) {
+ CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
+ return -EINVAL;
+ }
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ *len_ptr = (size_t)0;
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't get secure mem from non secure cb");
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_validate_secure_fd_in_list(idx,
+ ion_fd,
+ paddr_ptr,
+ len_ptr);
+
+ if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+ CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_stage2_iova);
+
+static int cam_smmu_unmap_validate_params(int handle)
+{
+ int idx;
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_smmu_unmap_user_iova(int handle,
+ int ion_fd, enum cam_smmu_region_id region_id)
+{
+ int idx, rc;
+ struct cam_dma_buff_info *mapping_info;
+
+ rc = cam_smmu_unmap_validate_params(handle);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "unmap util validation failure");
+ return rc;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't unmap non-secure mem from secure cb");
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ /* Based on ion_fd & index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+
+ if (!mapping_info) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params idx = %d, fd = %d",
+ idx, ion_fd);
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ /* Unmapping one buffer from device */
+ CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
+ rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+ if (rc < 0)
+ CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+
+unmap_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_user_iova);
+
+int cam_smmu_unmap_kernel_iova(int handle,
+ struct dma_buf *buf, enum cam_smmu_region_id region_id)
+{
+ int idx, rc;
+ struct cam_dma_buff_info *mapping_info;
+
+ rc = cam_smmu_unmap_validate_params(handle);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "unmap util validation failure");
+ return rc;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't unmap non-secure mem from secure cb");
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ /* Based on dma_buf & index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_dma_buf(idx, buf);
+
+ if (!mapping_info) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params idx = %d, dma_buf = %pK",
+ idx, buf);
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ /* Unmapping one buffer from device */
+ CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
+ rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+ if (rc < 0)
+ CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+
+unmap_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_kernel_iova);
+
+
+int cam_smmu_put_iova(int handle, int ion_fd)
+{
+ int idx;
+ int rc = 0;
+ struct cam_dma_buff_info *mapping_info;
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ /* based on ion fd and index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+ if (!mapping_info) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
+ idx, ion_fd);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+put_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_put_iova);
+
+int cam_smmu_destroy_handle(int handle)
+{
+ int idx;
+
+ if (handle == HANDLE_INIT) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
+ CAM_ERR(CAM_SMMU, "UMD %s buffer list is not clean",
+ iommu_cb_set.cb_info[idx].name);
+ cam_smmu_print_user_list(idx);
+ cam_smmu_clean_user_buffer_list(idx);
+ }
+
+ if (!list_empty_careful(
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list)) {
+ CAM_ERR(CAM_SMMU, "KMD %s buffer list is not clean",
+ iommu_cb_set.cb_info[idx].name);
+ cam_smmu_print_kernel_list(idx);
+ cam_smmu_clean_kernel_buffer_list(idx);
+ }
+
+ if (&iommu_cb_set.cb_info[idx].is_secure) {
+ if (iommu_cb_set.cb_info[idx].secure_count == 0) {
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EPERM;
+ }
+
+ iommu_cb_set.cb_info[idx].secure_count--;
+ if (iommu_cb_set.cb_info[idx].secure_count == 0) {
+ iommu_cb_set.cb_info[idx].cb_count = 0;
+ iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+ }
+
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return 0;
+ }
+
+ iommu_cb_set.cb_info[idx].cb_count = 0;
+ iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return 0;
+}
+EXPORT_SYMBOL(cam_smmu_destroy_handle);
+
+static void cam_smmu_deinit_cb(struct cam_context_bank_info *cb)
+{
+ arm_iommu_detach_device(cb->dev);
+
+ if (cb->io_support && cb->mapping) {
+ arm_iommu_release_mapping(cb->mapping);
+ cb->mapping = NULL;
+ }
+
+ if (cb->shared_support) {
+ gen_pool_destroy(cb->shared_mem_pool);
+ cb->shared_mem_pool = NULL;
+ }
+
+ if (cb->scratch_buf_support) {
+ kfree(cb->scratch_map.bitmap);
+ cb->scratch_map.bitmap = NULL;
+ }
+}
+
+static void cam_smmu_release_cb(struct platform_device *pdev)
+{
+ int i = 0;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++)
+ cam_smmu_deinit_cb(&iommu_cb_set.cb_info[i]);
+
+ devm_kfree(&pdev->dev, iommu_cb_set.cb_info);
+ iommu_cb_set.cb_num = 0;
+}
+
+static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
+ struct device *dev)
+{
+ int rc = 0;
+
+ if (!cb || !dev) {
+ CAM_ERR(CAM_SMMU, "Error: invalid input params");
+ return -EINVAL;
+ }
+
+ cb->dev = dev;
+ cb->is_fw_allocated = false;
+ cb->is_secheap_allocated = false;
+
+ /* Create a pool with 4K granularity for supporting shared memory */
+ if (cb->shared_support) {
+ cb->shared_mem_pool = gen_pool_create(
+ SHARED_MEM_POOL_GRANULARITY, -1);
+
+ if (!cb->shared_mem_pool)
+ return -ENOMEM;
+
+ rc = gen_pool_add(cb->shared_mem_pool,
+ cb->shared_info.iova_start,
+ cb->shared_info.iova_len,
+ -1);
+
+ CAM_DBG(CAM_SMMU, "Shared mem start->%lX",
+ (unsigned long)cb->shared_info.iova_start);
+ CAM_DBG(CAM_SMMU, "Shared mem len->%zu",
+ cb->shared_info.iova_len);
+
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "Genpool chunk creation failed");
+ gen_pool_destroy(cb->shared_mem_pool);
+ cb->shared_mem_pool = NULL;
+ return rc;
+ }
+ }
+
+ if (cb->scratch_buf_support) {
+ rc = cam_smmu_init_scratch_map(&cb->scratch_map,
+ cb->scratch_info.iova_start,
+ cb->scratch_info.iova_len,
+ 0);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Error: failed to create scratch map");
+ rc = -ENODEV;
+ goto end;
+ }
+ }
+
+ /* create a virtual mapping */
+ if (cb->io_support) {
+ cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
+ cb->io_info.iova_start, cb->io_info.iova_len);
+ if (IS_ERR(cb->mapping)) {
+ CAM_ERR(CAM_SMMU, "Error: create mapping Failed");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ iommu_cb_set.non_fatal_fault = 1;
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_NON_FATAL_FAULTS,
+ &iommu_cb_set.non_fatal_fault) < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Error: failed to set non fatal fault attribute");
+ }
+
+ if (!strcmp(cb->name, "icp")) {
+ iommu_cb_set.enable_iova_guard = 1;
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
+ &iommu_cb_set.enable_iova_guard) < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Failed to set iova guard pagei attr");
+ }
+ }
+ } else {
+ CAM_ERR(CAM_SMMU, "Context bank does not have IO region");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ return rc;
+end:
+ if (cb->shared_support) {
+ gen_pool_destroy(cb->shared_mem_pool);
+ cb->shared_mem_pool = NULL;
+ }
+
+ if (cb->scratch_buf_support) {
+ kfree(cb->scratch_map.bitmap);
+ cb->scratch_map.bitmap = NULL;
+ }
+
+ return rc;
+}
+
+static int cam_alloc_smmu_context_banks(struct device *dev)
+{
+ struct device_node *domains_child_node = NULL;
+
+ if (!dev) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid device");
+ return -ENODEV;
+ }
+
+ iommu_cb_set.cb_num = 0;
+
+ /* traverse thru all the child nodes and increment the cb count */
+ for_each_available_child_of_node(dev->of_node, domains_child_node) {
+ if (of_device_is_compatible(domains_child_node,
+ "qcom,msm-cam-smmu-cb"))
+ iommu_cb_set.cb_num++;
+
+ if (of_device_is_compatible(domains_child_node,
+ "qcom,qsmmu-cam-cb"))
+ iommu_cb_set.cb_num++;
+ }
+
+ if (iommu_cb_set.cb_num == 0) {
+ CAM_ERR(CAM_SMMU, "Error: no context banks present");
+ return -ENOENT;
+ }
+
+ /* allocate memory for the context banks */
+ iommu_cb_set.cb_info = devm_kzalloc(dev,
+ iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info),
+ GFP_KERNEL);
+
+ if (!iommu_cb_set.cb_info) {
+ CAM_ERR(CAM_SMMU, "Error: cannot allocate context banks");
+ return -ENOMEM;
+ }
+
+ cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
+ iommu_cb_set.cb_init_count = 0;
+
+ CAM_DBG(CAM_SMMU, "no of context banks :%d", iommu_cb_set.cb_num);
+ return 0;
+}
+
+static int cam_smmu_get_memory_regions_info(struct device_node *of_node,
+ struct cam_context_bank_info *cb)
+{
+ int rc = 0;
+ struct device_node *mem_map_node = NULL;
+ struct device_node *child_node = NULL;
+ const char *region_name;
+ int num_regions = 0;
+
+ if (!of_node || !cb) {
+ CAM_ERR(CAM_SMMU, "Invalid argument(s)");
+ return -EINVAL;
+ }
+
+ mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
+ cb->is_secure = of_property_read_bool(of_node, "qcom,secure-cb");
+
+ /*
+ * We always expect a memory map node, except when it is a secure
+ * context bank.
+ */
+ if (!mem_map_node) {
+ if (cb->is_secure)
+ return 0;
+ CAM_ERR(CAM_SMMU, "iova-mem-map not present");
+ return -EINVAL;
+ }
+
+ for_each_available_child_of_node(mem_map_node, child_node) {
+ uint32_t region_start;
+ uint32_t region_len;
+ uint32_t region_id;
+ uint32_t qdss_region_phy_addr = 0;
+
+ num_regions++;
+ rc = of_property_read_string(child_node,
+ "iova-region-name", &region_name);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ CAM_ERR(CAM_SMMU, "IOVA region not found");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child_node,
+ "iova-region-start", &region_start);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ CAM_ERR(CAM_SMMU, "Failed to read iova-region-start");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child_node,
+ "iova-region-len", &region_len);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ CAM_ERR(CAM_SMMU, "Failed to read iova-region-len");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child_node,
+ "iova-region-id", &region_id);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ CAM_ERR(CAM_SMMU, "Failed to read iova-region-id");
+ return -EINVAL;
+ }
+
+ if (strcmp(region_name, qdss_region_name) == 0) {
+ rc = of_property_read_u32(child_node,
+ "qdss-phy-addr", &qdss_region_phy_addr);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ CAM_ERR(CAM_SMMU,
+ "Failed to read qdss phy addr");
+ return -EINVAL;
+ }
+ }
+
+ switch (region_id) {
+ case CAM_SMMU_REGION_FIRMWARE:
+ cb->firmware_support = 1;
+ cb->firmware_info.iova_start = region_start;
+ cb->firmware_info.iova_len = region_len;
+ break;
+ case CAM_SMMU_REGION_SHARED:
+ cb->shared_support = 1;
+ cb->shared_info.iova_start = region_start;
+ cb->shared_info.iova_len = region_len;
+ break;
+ case CAM_SMMU_REGION_SCRATCH:
+ cb->scratch_buf_support = 1;
+ cb->scratch_info.iova_start = region_start;
+ cb->scratch_info.iova_len = region_len;
+ break;
+ case CAM_SMMU_REGION_IO:
+ cb->io_support = 1;
+ cb->io_info.iova_start = region_start;
+ cb->io_info.iova_len = region_len;
+ break;
+ case CAM_SMMU_REGION_SECHEAP:
+ cb->secheap_support = 1;
+ cb->secheap_info.iova_start = region_start;
+ cb->secheap_info.iova_len = region_len;
+ break;
+ case CAM_SMMU_REGION_QDSS:
+ cb->qdss_support = 1;
+ cb->qdss_info.iova_start = region_start;
+ cb->qdss_info.iova_len = region_len;
+ cb->qdss_phy_addr = qdss_region_phy_addr;
+ break;
+ default:
+ CAM_ERR(CAM_SMMU,
+ "Incorrect region id present in DT file: %d",
+ region_id);
+ }
+
+ CAM_DBG(CAM_SMMU, "Found label -> %s", cb->name);
+ CAM_DBG(CAM_SMMU, "Found region -> %s", region_name);
+ CAM_DBG(CAM_SMMU, "region_start -> %X", region_start);
+ CAM_DBG(CAM_SMMU, "region_len -> %X", region_len);
+ CAM_DBG(CAM_SMMU, "region_id -> %X", region_id);
+ }
+ of_node_put(mem_map_node);
+
+ if (!num_regions) {
+ CAM_ERR(CAM_SMMU,
+ "No memory regions found, at least one needed");
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+static int cam_populate_smmu_context_banks(struct device *dev,
+ enum cam_iommu_type type)
+{
+ int rc = 0;
+ struct cam_context_bank_info *cb;
+ struct device *ctx = NULL;
+
+ if (!dev) {
+ CAM_ERR(CAM_SMMU, "Error: Invalid device");
+ return -ENODEV;
+ }
+
+ /* check the bounds */
+ if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU, "Error: populate more than allocated cb");
+ rc = -EBADHANDLE;
+ goto cb_init_fail;
+ }
+
+ /* read the context bank from cb set */
+ cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count];
+
+ /* set the name of the context bank */
+ rc = of_property_read_string(dev->of_node, "label", &cb->name);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Error: failed to read label from sub device");
+ goto cb_init_fail;
+ }
+
+ rc = cam_smmu_get_memory_regions_info(dev->of_node,
+ cb);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: Getting region info");
+ return rc;
+ }
+
+ if (cb->is_secure) {
+ /* increment count to next bank */
+ iommu_cb_set.cb_init_count++;
+ return 0;
+ }
+
+ /* set up the iommu mapping for the context bank */
+ if (type == CAM_QSMMU) {
+ CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s",
+ cb->name);
+ return -ENODEV;
+ }
+
+ ctx = dev;
+ CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name);
+
+ rc = cam_smmu_setup_cb(cb, ctx);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", cb->name);
+ goto cb_init_fail;
+ }
+ if (cb->io_support && cb->mapping)
+ iommu_set_fault_handler(cb->mapping->domain,
+ cam_smmu_iommu_fault_handler,
+ (void *)cb->name);
+ /* increment count to next bank */
+ iommu_cb_set.cb_init_count++;
+
+ CAM_DBG(CAM_SMMU, "X: cb init count :%d", iommu_cb_set.cb_init_count);
+
+cb_init_fail:
+ return rc;
+}
+
+static int cam_smmu_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct device *dev = &pdev->dev;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
+ rc = cam_alloc_smmu_context_banks(dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: allocating context banks");
+ return -ENOMEM;
+ }
+ }
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
+ rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: populating context banks");
+ cam_smmu_release_cb(pdev);
+ return -ENOMEM;
+ }
+ return rc;
+ }
+ if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
+ rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: populating context banks");
+ return -ENOMEM;
+ }
+ return rc;
+ }
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-fw-dev")) {
+ icp_fw.fw_dev = &pdev->dev;
+ icp_fw.fw_kva = NULL;
+ icp_fw.fw_dma_hdl = 0;
+ return rc;
+ }
+
+ /* probe through all the subdevices */
+ rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
+ NULL, &pdev->dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_SMMU, "Error: populating devices");
+ } else {
+ INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
+ mutex_init(&iommu_cb_set.payload_list_lock);
+ INIT_LIST_HEAD(&iommu_cb_set.payload_list);
+ }
+
+ return rc;
+}
+
+static int cam_smmu_remove(struct platform_device *pdev)
+{
+ /* release all the context banks and memory allocated */
+ cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu"))
+ cam_smmu_release_cb(pdev);
+ return 0;
+}
+
+static struct platform_driver cam_smmu_driver = {
+ .probe = cam_smmu_probe,
+ .remove = cam_smmu_remove,
+ .driver = {
+ .name = "msm_cam_smmu",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cam_smmu_dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_smmu_init_module(void)
+{
+ return platform_driver_register(&cam_smmu_driver);
+}
+
+static void __exit cam_smmu_exit_module(void)
+{
+ platform_driver_unregister(&cam_smmu_driver);
+}
+
+module_init(cam_smmu_init_module);
+module_exit(cam_smmu_exit_module);
+MODULE_DESCRIPTION("MSM Camera SMMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera_v3/cam_smmu/cam_smmu_api.h
new file mode 100644
index 000000000000..caf326d6c716
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_smmu/cam_smmu_api.h
@@ -0,0 +1,392 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SMMU_API_H_
+#define _CAM_SMMU_API_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+
+/*Enum for possible CAM SMMU operations */
+enum cam_smmu_ops_param {
+ CAM_SMMU_ATTACH,
+ CAM_SMMU_DETACH,
+ CAM_SMMU_VOTE,
+ CAM_SMMU_DEVOTE,
+ CAM_SMMU_OPS_INVALID
+};
+
+enum cam_smmu_map_dir {
+ CAM_SMMU_MAP_READ,
+ CAM_SMMU_MAP_WRITE,
+ CAM_SMMU_MAP_RW,
+ CAM_SMMU_MAP_INVALID
+};
+
+enum cam_smmu_region_id {
+ CAM_SMMU_REGION_FIRMWARE,
+ CAM_SMMU_REGION_SHARED,
+ CAM_SMMU_REGION_SCRATCH,
+ CAM_SMMU_REGION_IO,
+ CAM_SMMU_REGION_SECHEAP,
+ CAM_SMMU_REGION_QDSS
+};
+
+/**
+ * @brief : Callback function type that gets called back on cam
+ * smmu page fault.
+ *
+ * @param domain : Iommu domain received in iommu page fault handler
+ * @param dev : Device received in iommu page fault handler
+ * @param iova : IOVA where page fault occurred
+ * @param flags : Flags received in iommu page fault handler
+ * @param token : Userdata given during callback registration
+ * @param buf_info : Closest mapped buffer info
+ */
+typedef void (*cam_smmu_client_page_fault_handler)(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token,
+ uint32_t buf_info);
+
+/**
+ * @brief : Structure to store region information
+ *
+ * @param iova_start : Start address of region
+ * @param iova_len : length of region
+ */
+struct cam_smmu_region_info {
+ dma_addr_t iova_start;
+ size_t iova_len;
+};
+
+/**
+ * @brief : Gets an smmu handle
+ *
+ * @param identifier: Unique identifier to be used by clients which they
+ * should get from device tree. CAM SMMU driver will
+ * not enforce how this string is obtained and will
+ * only validate this against the list of permitted
+ * identifiers
+ * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
+ * fill the handle pointed by handle_ptr
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_handle(char *identifier, int *handle_ptr);
+
+/**
+ * @brief : Performs IOMMU operations
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param op : Operation to be performed. Can be either CAM_SMMU_ATTACH
+ * or CAM_SMMU_DETACH
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
+
+/**
+ * @brief : Maps user space IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ * DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr : Pointer to physical address where mapped address will be
+ * returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ * CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
+ * which specifies the cpu virtual address to map.
+ * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_user_iova(int handle,
+ int ion_fd, enum cam_smmu_map_dir dir,
+ dma_addr_t *dma_addr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id);
+
+/**
+ * @brief : Maps kernel space IOVA for calling driver
+ *
+ * @param handle : Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param buf : dma_buf allocated for kernel usage in mem_mgr
+ * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ * DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr : Pointer to physical address where mapped address will be
+ * returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ * CAM_SMMU_REGION_SHARED, dma_addr is used as an input
+ * parameter which specifies the cpu virtual address to map.
+ * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_kernel_iova(int handle,
+ struct dma_buf *buf, enum cam_smmu_map_dir dir,
+ dma_addr_t *dma_addr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id);
+
+/**
+ * @brief : Unmaps user space IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_user_iova(int handle,
+ int ion_fd, enum cam_smmu_region_id region_id);
+
+/**
+ * @brief : Unmaps kernel IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param buf : dma_buf allocated for the kernel
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_kernel_iova(int handle,
+ struct dma_buf *buf, enum cam_smmu_region_id region_id);
+
+/**
+ * @brief : Allocates a scratch buffer
+ *
+ * This function allocates a scratch virtual buffer of length virt_len in the
+ * device virtual address space mapped to phys_len physically contiguous bytes
+ * in that device's SMMU.
+ *
+ * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
+ * other, otherwise -EINVAL is returned.
+ *
+ * -EINVAL will be returned if virt_len is less than phys_len.
+ *
+ * Passing a too large phys_len might also cause failure if that much size is
+ * not available for allocation in a physically contiguous way.
+ *
+ * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param dir : Direction of mapping which will translate to IOMMU_READ
+ * IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address that the client device will be
+ * able to read from/write to
+ * @param virt_len : Virtual length of the scratch buffer
+ * @param phys_len : Physical length of the scratch buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_get_scratch_iova(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr,
+ size_t virt_len,
+ size_t phys_len);
+
+/**
+ * @brief : Frees a scratch buffer
+ *
+ * This function frees a scratch buffer and releases the corresponding SMMU
+ * mappings.
+ *
+ * @param handle : Handle to identify the CAMSMMU client (IFE, ICP, etc.)
+ * @param paddr : Device virtual address of client's scratch buffer that
+ * will be freed.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_put_scratch_iova(int handle,
+ dma_addr_t paddr);
+
+/**
+ *@brief : Destroys an smmu handle
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_destroy_handle(int handle);
+
+/**
+ * @brief : Finds index by handle in the smmu client table
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @return Index of SMMU client. Nagative in case of error.
+ */
+int cam_smmu_find_index_by_handle(int hdl);
+
+/**
+ * @brief : Registers smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param handler_cb: It is triggered in IOMMU page fault
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_set_client_page_fault_handler(int handle,
+ cam_smmu_client_page_fault_handler handler_cb, void *token);
+
+/**
+ * @brief : Unregisters smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_unset_client_page_fault_handler(int handle, void *token);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_iova(int handle, int ion_fd,
+ dma_addr_t *paddr_ptr, size_t *len_ptr);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the secure context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_stage2_iova(int handle, int ion_fd,
+ dma_addr_t *paddr_ptr, size_t *len_ptr);
+/**
+ * @brief Unmaps memory from context bank
+ *
+ * @param handle: SMMU handle identifying the context bank
+ * @param ion_fd: ION fd of memory to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_put_iova(int handle, int ion_fd);
+
+/**
+ * @brief Maps secure memory for SMMU handle
+ *
+ * @param handle: SMMU handle identifying secure context bank
+ * @param ion_fd: ION fd to map securely
+ * @param dir: DMA Direction for the mapping
+ * @param client: Ion client passed by caller
+ * @param dma_addr: Returned IOVA address after mapping
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_stage2_iova(int handle,
+ int ion_fd, enum cam_smmu_map_dir dir, struct ion_client *client,
+ ion_phys_addr_t *dma_addr, size_t *len_ptr);
+
+/**
+ * @brief Unmaps secure memopry for SMMU handle
+ *
+ * @param handle: SMMU handle identifying secure context bank
+ * @param ion_fd: ION fd to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd);
+
+
+/**
+ * @brief Allocates firmware for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying context bank
+ * @param iova: IOVA address of allocated firmware
+ * @param kvaddr: CPU mapped address of allocated firmware
+ * @param len: Length of allocated firmware memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+ dma_addr_t *iova,
+ uintptr_t *kvaddr,
+ size_t *len);
+
+/**
+ * @brief Deallocates firmware memory for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
+
+/**
+ * @brief Gets region information specified by smmu handle and region id
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param region_id: Region id for which information is desired
+ * @param region_info: Struct populated with region information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_region_info(int32_t smmu_hdl,
+ enum cam_smmu_region_id region_id,
+ struct cam_smmu_region_info *region_info);
+
+/**
+ * @brief Reserves secondary heap
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param iova: IOVA of secondary heap after reservation has completed
+ * @param buf: Allocated dma_buf for secondary heap
+ * @param request_len: Length of secondary heap after reservation has completed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
+ struct dma_buf *buf,
+ dma_addr_t *iova,
+ size_t *request_len);
+
+/**
+ * @brief Releases secondary heap
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_release_sec_heap(int32_t smmu_hdl);
+
+/**
+ * @brief Allocates qdss for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying context bank
+ * @param iova: IOVA address of allocated qdss
+ * @param len: Length of allocated qdss memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_alloc_qdss(int32_t smmu_hdl,
+ dma_addr_t *iova,
+ size_t *len);
+
+/**
+ * @brief Deallocates qdss memory for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_dealloc_qdss(int32_t smmu_hdl);
+
+#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sync/Makefile b/drivers/media/platform/msm/camera_v3/cam_sync/Makefile
new file mode 100644
index 000000000000..b654541dc58a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sync/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync.c
new file mode 100644
index 000000000000..7a772f1c4269
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync.c
@@ -0,0 +1,1093 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include "cam_sync_util.h"
+#include "cam_debug_util.h"
+#include "cam_common_util.h"
+
+struct sync_device *sync_dev;
+
+/*
+ * Flag to determine whether to enqueue cb of a
+ * signaled fence onto the workq or invoke it
+ * directly in the same context
+ */
+static bool trigger_cb_without_switch;
+
+int cam_sync_create(int32_t *sync_obj, const char *name)
+{
+ int rc;
+ long idx;
+ bool bit;
+
+ do {
+ idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+ if (idx >= CAM_SYNC_MAX_OBJS)
+ return -ENOMEM;
+ CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
+ bit = test_and_set_bit(idx, sync_dev->bitmap);
+ } while (bit);
+
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
+ CAM_SYNC_TYPE_INDV);
+ if (rc) {
+ CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+ idx);
+ clear_bit(idx, sync_dev->bitmap);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ return -EINVAL;
+ }
+
+ *sync_obj = idx;
+ CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+ return rc;
+}
+
+int cam_sync_register_callback(sync_callback cb_func,
+ void *userdata, int32_t sync_obj)
+{
+ struct sync_callback_info *sync_cb;
+ struct sync_table_row *row = NULL;
+ int status = 0;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
+ return -EINVAL;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj %d",
+ sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -EINVAL;
+ }
+
+ sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
+ if (!sync_cb) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -ENOMEM;
+ }
+
+ /* Trigger callback if sync object is already in SIGNALED state */
+ if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
+ row->state == CAM_SYNC_STATE_SIGNALED_ERROR) &&
+ (!row->remaining)) {
+ if (trigger_cb_without_switch) {
+ CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
+ sync_obj);
+ status = row->state;
+ kfree(sync_cb);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ cb_func(sync_obj, status, userdata);
+ } else {
+ sync_cb->callback_func = cb_func;
+ sync_cb->cb_data = userdata;
+ sync_cb->sync_obj = sync_obj;
+ INIT_WORK(&sync_cb->cb_dispatch_work,
+ cam_sync_util_cb_dispatch);
+ sync_cb->status = row->state;
+ CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
+ sync_cb->sync_obj);
+ queue_work(sync_dev->work_queue,
+ &sync_cb->cb_dispatch_work);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ }
+
+ return 0;
+ }
+
+ sync_cb->callback_func = cb_func;
+ sync_cb->cb_data = userdata;
+ sync_cb->sync_obj = sync_obj;
+ INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
+ list_add_tail(&sync_cb->list, &row->callback_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
+ return 0;
+}
+
+int cam_sync_deregister_callback(sync_callback cb_func,
+ void *userdata, int32_t sync_obj)
+{
+ struct sync_table_row *row = NULL;
+ struct sync_callback_info *sync_cb, *temp;
+ bool found = false;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
+ sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
+ sync_obj);
+ list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
+ if (sync_cb->callback_func == cb_func &&
+ sync_cb->cb_data == userdata) {
+ list_del_init(&sync_cb->list);
+ kfree(sync_cb);
+ found = true;
+ }
+ }
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return found ? 0 : -ENOENT;
+}
+
+int cam_sync_signal(int32_t sync_obj, uint32_t status)
+{
+ struct sync_table_row *row = NULL;
+ struct sync_table_row *parent_row = NULL;
+ struct sync_parent_info *parent_info, *temp_parent_info;
+ struct list_head parents_list;
+ int rc = 0;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
+ CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
+ sync_obj, CAM_SYNC_MAX_OBJS);
+ return -EINVAL;
+ }
+ row = sync_dev->sync_table + sync_obj;
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
+ sync_obj);
+ return -EINVAL;
+ }
+
+ if (row->type == CAM_SYNC_TYPE_GROUP) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ CAM_ERR(CAM_SYNC,
+ "Error: Signaling a GROUP sync object = %d",
+ sync_obj);
+ return -EINVAL;
+ }
+
+ if (row->state != CAM_SYNC_STATE_ACTIVE) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ CAM_ERR(CAM_SYNC,
+ "Error: Sync object already signaled sync_obj = %d",
+ sync_obj);
+ return -EALREADY;
+ }
+
+ if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
+ status != CAM_SYNC_STATE_SIGNALED_ERROR) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ CAM_ERR(CAM_SYNC,
+ "Error: signaling with undefined status = %d",
+ status);
+ return -EINVAL;
+ }
+
+ if (!atomic_dec_and_test(&row->ref_cnt)) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return 0;
+ }
+
+ row->state = status;
+ cam_sync_util_dispatch_signaled_cb(sync_obj, status);
+
+ /* copy parent list to local and release child lock */
+ INIT_LIST_HEAD(&parents_list);
+ list_splice_init(&row->parents_list, &parents_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
+ if (list_empty(&parents_list))
+ return 0;
+
+ /*
+ * Now iterate over all parents of this object and if they too need to
+ * be signaled dispatch cb's
+ */
+ list_for_each_entry_safe(parent_info,
+ temp_parent_info,
+ &parents_list,
+ list) {
+ parent_row = sync_dev->sync_table + parent_info->sync_id;
+ spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+ parent_row->remaining--;
+
+ rc = cam_sync_util_update_parent_state(
+ parent_row,
+ status);
+ if (rc) {
+ CAM_ERR(CAM_SYNC, "Invalid parent state %d",
+ parent_row->state);
+ spin_unlock_bh(
+ &sync_dev->row_spinlocks[parent_info->sync_id]);
+ kfree(parent_info);
+ continue;
+ }
+
+ if (!parent_row->remaining)
+ cam_sync_util_dispatch_signaled_cb(
+ parent_info->sync_id, parent_row->state);
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
+ }
+
+ return 0;
+}
+
+int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
+{
+ int rc;
+ long idx = 0;
+ bool bit;
+
+ if (!sync_obj || !merged_obj) {
+ CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
+ return -EINVAL;
+ }
+
+ if (num_objs <= 1) {
+ CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
+ return -EINVAL;
+ }
+
+ if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
+ != num_objs) {
+ CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
+ return -EINVAL;
+ }
+
+ do {
+ idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+ if (idx >= CAM_SYNC_MAX_OBJS)
+ return -ENOMEM;
+ bit = test_and_set_bit(idx, sync_dev->bitmap);
+ } while (bit);
+
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ rc = cam_sync_init_group_object(sync_dev->sync_table,
+ idx, sync_obj,
+ num_objs);
+ if (rc < 0) {
+ CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+ idx);
+ clear_bit(idx, sync_dev->bitmap);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ return -EINVAL;
+ }
+ CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
+ *merged_obj = idx;
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+ return 0;
+}
+
+int cam_sync_get_obj_ref(int32_t sync_obj)
+{
+ struct sync_table_row *row = NULL;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ row = sync_dev->sync_table + sync_obj;
+
+ spin_lock(&sync_dev->row_spinlocks[sync_obj]);
+
+ if (row->state != CAM_SYNC_STATE_ACTIVE) {
+ spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
+ sync_obj);
+ return -EINVAL;
+ }
+
+ atomic_inc(&row->ref_cnt);
+ spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
+ CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
+
+ return 0;
+}
+
+int cam_sync_put_obj_ref(int32_t sync_obj)
+{
+ struct sync_table_row *row = NULL;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ row = sync_dev->sync_table + sync_obj;
+ atomic_dec(&row->ref_cnt);
+ CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
+
+ return 0;
+}
+
+int cam_sync_destroy(int32_t sync_obj)
+{
+ CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
+ return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
+}
+
+int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
+{
+ unsigned long timeleft;
+ int rc = -EINVAL;
+ struct sync_table_row *row = NULL;
+
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
+ sync_obj);
+ return -EINVAL;
+ }
+
+ timeleft = wait_for_completion_timeout(&row->signaled,
+ msecs_to_jiffies(timeout_ms));
+
+ if (!timeleft) {
+ CAM_ERR(CAM_SYNC,
+ "Error: timed out for sync obj = %d", sync_obj);
+ rc = -ETIMEDOUT;
+ } else {
+ switch (row->state) {
+ case CAM_SYNC_STATE_INVALID:
+ case CAM_SYNC_STATE_ACTIVE:
+ case CAM_SYNC_STATE_SIGNALED_ERROR:
+ CAM_ERR(CAM_SYNC,
+ "Error: Wait on invalid state = %d, obj = %d",
+ row->state, sync_obj);
+ rc = -EINVAL;
+ break;
+ case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+ rc = 0;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_info sync_create;
+ int result;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_info))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_create,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ k_ioctl->size))
+ return -EFAULT;
+
+ result = cam_sync_create(&sync_create.sync_obj,
+ sync_create.name);
+
+ if (!result)
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ &sync_create,
+ k_ioctl->size))
+ return -EFAULT;
+
+ return result;
+}
+
+static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_signal sync_signal;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_signal))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_signal,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ k_ioctl->size))
+ return -EFAULT;
+
+ /* need to get ref for UMD signaled fences */
+ cam_sync_get_obj_ref(sync_signal.sync_obj);
+ return cam_sync_signal(sync_signal.sync_obj,
+ sync_signal.sync_state);
+}
+
+static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_merge sync_merge;
+ uint32_t *sync_objs;
+ uint32_t num_objs;
+ uint32_t size;
+ int result;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_merge))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_merge,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ k_ioctl->size))
+ return -EFAULT;
+
+ if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
+ return -EINVAL;
+
+ size = sizeof(uint32_t) * sync_merge.num_objs;
+ sync_objs = kzalloc(size, GFP_ATOMIC);
+
+ if (!sync_objs)
+ return -ENOMEM;
+
+ if (copy_from_user(sync_objs,
+ u64_to_user_ptr(sync_merge.sync_objs),
+ sizeof(uint32_t) * sync_merge.num_objs)) {
+ kfree(sync_objs);
+ return -EFAULT;
+ }
+
+ num_objs = sync_merge.num_objs;
+
+ result = cam_sync_merge(sync_objs,
+ num_objs,
+ &sync_merge.merged);
+
+ if (!result)
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ &sync_merge,
+ k_ioctl->size)) {
+ kfree(sync_objs);
+ return -EFAULT;
+ }
+
+ kfree(sync_objs);
+
+ return result;
+}
+
+static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_wait sync_wait;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_wait))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_wait,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ k_ioctl->size))
+ return -EFAULT;
+
+ k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
+ sync_wait.timeout_ms);
+
+ return 0;
+}
+
+static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_info sync_create;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_info))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&sync_create,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ k_ioctl->size))
+ return -EFAULT;
+
+ return cam_sync_destroy(sync_create.sync_obj);
+}
+
+static int cam_sync_handle_register_user_payload(
+ struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_userpayload_info userpayload_info;
+ struct sync_user_payload *user_payload_kernel;
+ struct sync_user_payload *user_payload_iter;
+ struct sync_user_payload *temp_upayload_kernel;
+ uint32_t sync_obj;
+ struct sync_table_row *row = NULL;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
+ return -EINVAL;
+
+ if (!k_ioctl->ioctl_ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&userpayload_info,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ k_ioctl->size))
+ return -EFAULT;
+
+ sync_obj = userpayload_info.sync_obj;
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
+ if (!user_payload_kernel)
+ return -ENOMEM;
+
+ memcpy(user_payload_kernel->payload_data,
+ userpayload_info.payload,
+ CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
+ sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ kfree(user_payload_kernel);
+ return -EINVAL;
+ }
+
+ if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
+ row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
+
+ cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+ sync_obj,
+ row->state,
+ user_payload_kernel->payload_data,
+ CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ kfree(user_payload_kernel);
+ return 0;
+ }
+
+ list_for_each_entry_safe(user_payload_iter,
+ temp_upayload_kernel,
+ &row->user_payload_list,
+ list) {
+ if (user_payload_iter->payload_data[0] ==
+ user_payload_kernel->payload_data[0] &&
+ user_payload_iter->payload_data[1] ==
+ user_payload_kernel->payload_data[1]) {
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ kfree(user_payload_kernel);
+ return -EALREADY;
+ }
+ }
+
+ list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return 0;
+}
+
+static int cam_sync_handle_deregister_user_payload(
+ struct cam_private_ioctl_arg *k_ioctl)
+{
+ struct cam_sync_userpayload_info userpayload_info;
+ struct sync_user_payload *user_payload_kernel, *temp;
+ uint32_t sync_obj;
+ struct sync_table_row *row = NULL;
+
+ if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
+ CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
+ return -EINVAL;
+ }
+
+ if (!k_ioctl->ioctl_ptr) {
+ CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&userpayload_info,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
+ k_ioctl->size))
+ return -EFAULT;
+
+ sync_obj = userpayload_info.sync_obj;
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ return -EINVAL;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ row = sync_dev->sync_table + sync_obj;
+
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
+ sync_obj);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(user_payload_kernel, temp,
+ &row->user_payload_list, list) {
+ if (user_payload_kernel->payload_data[0] ==
+ userpayload_info.payload[0] &&
+ user_payload_kernel->payload_data[1] ==
+ userpayload_info.payload[1]) {
+ list_del_init(&user_payload_kernel->list);
+ kfree(user_payload_kernel);
+ }
+ }
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ return 0;
+}
+
+static long cam_sync_dev_ioctl(struct file *filep, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ int32_t rc;
+ struct sync_device *sync_dev = video_drvdata(filep);
+ struct cam_private_ioctl_arg k_ioctl;
+
+ if (!sync_dev) {
+ CAM_ERR(CAM_SYNC, "sync_dev NULL");
+ return -EINVAL;
+ }
+
+ if (!arg)
+ return -EINVAL;
+
+ if (cmd != CAM_PRIVATE_IOCTL_CMD)
+ return -ENOIOCTLCMD;
+
+ k_ioctl = *(struct cam_private_ioctl_arg *)arg;
+
+ switch (k_ioctl.id) {
+ case CAM_SYNC_CREATE:
+ rc = cam_sync_handle_create(&k_ioctl);
+ break;
+ case CAM_SYNC_DESTROY:
+ rc = cam_sync_handle_destroy(&k_ioctl);
+ break;
+ case CAM_SYNC_REGISTER_PAYLOAD:
+ rc = cam_sync_handle_register_user_payload(
+ &k_ioctl);
+ break;
+ case CAM_SYNC_DEREGISTER_PAYLOAD:
+ rc = cam_sync_handle_deregister_user_payload(
+ &k_ioctl);
+ break;
+ case CAM_SYNC_SIGNAL:
+ rc = cam_sync_handle_signal(&k_ioctl);
+ break;
+ case CAM_SYNC_MERGE:
+ rc = cam_sync_handle_merge(&k_ioctl);
+ break;
+ case CAM_SYNC_WAIT:
+ rc = cam_sync_handle_wait(&k_ioctl);
+ ((struct cam_private_ioctl_arg *)arg)->result =
+ k_ioctl.result;
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+
+ return rc;
+}
+
+static unsigned int cam_sync_poll(struct file *f,
+ struct poll_table_struct *pll_table)
+{
+ int rc = 0;
+ struct v4l2_fh *eventq = f->private_data;
+
+ if (!eventq)
+ return -EINVAL;
+
+ poll_wait(f, &eventq->wait, pll_table);
+
+ if (v4l2_event_pending(eventq))
+ rc = POLLPRI;
+
+ return rc;
+}
+
+static int cam_sync_open(struct file *filep)
+{
+ int rc;
+ struct sync_device *sync_dev = video_drvdata(filep);
+
+ if (!sync_dev) {
+ CAM_ERR(CAM_SYNC, "Sync device NULL");
+ return -ENODEV;
+ }
+
+ mutex_lock(&sync_dev->table_lock);
+ if (sync_dev->open_cnt >= 1) {
+ mutex_unlock(&sync_dev->table_lock);
+ return -EALREADY;
+ }
+
+ rc = v4l2_fh_open(filep);
+ if (!rc) {
+ sync_dev->open_cnt++;
+ spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+ sync_dev->cam_sync_eventq = filep->private_data;
+ spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+ } else {
+ CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
+ }
+ mutex_unlock(&sync_dev->table_lock);
+
+ return rc;
+}
+
+static int cam_sync_close(struct file *filep)
+{
+ int rc = 0;
+ int i;
+ struct sync_device *sync_dev = video_drvdata(filep);
+
+ if (!sync_dev) {
+ CAM_ERR(CAM_SYNC, "Sync device NULL");
+ rc = -ENODEV;
+ return rc;
+ }
+ mutex_lock(&sync_dev->table_lock);
+ sync_dev->open_cnt--;
+ if (!sync_dev->open_cnt) {
+ for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
+ struct sync_table_row *row =
+ sync_dev->sync_table + i;
+
+ /*
+ * Signal all ACTIVE objects as ERR, but we don't
+ * care about the return status here apart from logging
+ * it.
+ */
+ if (row->state == CAM_SYNC_STATE_ACTIVE) {
+ rc = cam_sync_signal(i,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc < 0)
+ CAM_ERR(CAM_SYNC,
+ "Cleanup signal fail idx:%d\n",
+ i);
+ }
+ }
+
+ /*
+ * Flush the work queue to wait for pending signal callbacks to
+ * finish
+ */
+ flush_workqueue(sync_dev->work_queue);
+
+ /*
+ * Now that all callbacks worker threads have finished,
+ * destroy the sync objects
+ */
+ for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
+ struct sync_table_row *row =
+ sync_dev->sync_table + i;
+
+ if (row->state != CAM_SYNC_STATE_INVALID) {
+ rc = cam_sync_destroy(i);
+ if (rc < 0)
+ CAM_ERR(CAM_SYNC,
+ "Cleanup destroy fail:idx:%d\n",
+ i);
+ }
+ }
+ }
+ mutex_unlock(&sync_dev->table_lock);
+ spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+ sync_dev->cam_sync_eventq = NULL;
+ spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+ v4l2_fh_release(filep);
+
+ return rc;
+}
+
+int cam_sync_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS, NULL);
+}
+
+int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
+ .vidioc_subscribe_event = cam_sync_subscribe_event,
+ .vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
+ .vidioc_default = cam_sync_dev_ioctl,
+};
+
+static struct v4l2_file_operations cam_sync_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = cam_sync_open,
+ .release = cam_sync_close,
+ .poll = cam_sync_poll,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = video_ioctl2,
+#endif
+};
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+static int cam_sync_media_controller_init(struct sync_device *sync_dev,
+ struct platform_device *pdev)
+{
+ int rc;
+
+ sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
+ GFP_KERNEL);
+ if (!sync_dev->v4l2_dev.mdev)
+ return -ENOMEM;
+
+ media_device_init(sync_dev->v4l2_dev.mdev);
+ strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
+ sizeof(sync_dev->v4l2_dev.mdev->model));
+ sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
+
+ rc = media_device_register(sync_dev->v4l2_dev.mdev);
+ if (rc < 0)
+ goto register_fail;
+
+ rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
+ if (rc < 0)
+ goto entity_fail;
+
+ return 0;
+
+entity_fail:
+ media_device_unregister(sync_dev->v4l2_dev.mdev);
+register_fail:
+ media_device_cleanup(sync_dev->v4l2_dev.mdev);
+ return rc;
+}
+
+static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
+{
+ media_entity_cleanup(&sync_dev->vdev->entity);
+ media_device_unregister(sync_dev->v4l2_dev.mdev);
+ media_device_cleanup(sync_dev->v4l2_dev.mdev);
+ kfree(sync_dev->v4l2_dev.mdev);
+}
+
+static void cam_sync_init_entity(struct sync_device *sync_dev)
+{
+ sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
+ sync_dev->vdev->entity.name =
+ video_device_node_name(sync_dev->vdev);
+}
+#else
+static int cam_sync_media_controller_init(struct sync_device *sync_dev,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+
+static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
+{
+}
+
+static void cam_sync_init_entity(struct sync_device *sync_dev)
+{
+}
+#endif
+
+static int cam_sync_create_debugfs(void)
+{
+ sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
+
+ if (!sync_dev->dentry) {
+ CAM_ERR(CAM_SYNC, "Failed to create sync dir");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_bool("trigger_cb_without_switch",
+ 0644, sync_dev->dentry,
+ &trigger_cb_without_switch)) {
+ CAM_ERR(CAM_SYNC,
+ "failed to create trigger_cb_without_switch entry");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int cam_sync_probe(struct platform_device *pdev)
+{
+ int rc;
+ int idx;
+
+ sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
+ if (!sync_dev)
+ return -ENOMEM;
+
+ mutex_init(&sync_dev->table_lock);
+ spin_lock_init(&sync_dev->cam_sync_eventq_lock);
+
+ for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
+ spin_lock_init(&sync_dev->row_spinlocks[idx]);
+
+ sync_dev->vdev = video_device_alloc();
+ if (!sync_dev->vdev) {
+ rc = -ENOMEM;
+ goto vdev_fail;
+ }
+
+ rc = cam_sync_media_controller_init(sync_dev, pdev);
+ if (rc < 0)
+ goto mcinit_fail;
+
+ sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
+
+ rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
+ if (rc < 0)
+ goto register_fail;
+
+ strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
+ sizeof(sync_dev->vdev->name));
+ sync_dev->vdev->release = video_device_release;
+ sync_dev->vdev->fops = &cam_sync_v4l2_fops;
+ sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
+ sync_dev->vdev->minor = -1;
+ sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
+ rc = video_register_device(sync_dev->vdev,
+ VFL_TYPE_GRABBER, -1);
+ if (rc < 0)
+ goto v4l2_fail;
+
+ cam_sync_init_entity(sync_dev);
+ video_set_drvdata(sync_dev->vdev, sync_dev);
+ memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
+ memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
+ bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+
+ /*
+ * We treat zero as invalid handle, so we will keep the 0th bit set
+ * always
+ */
+ set_bit(0, sync_dev->bitmap);
+
+ sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+
+ if (!sync_dev->work_queue) {
+ CAM_ERR(CAM_SYNC,
+ "Error: high priority work queue creation failed");
+ rc = -ENOMEM;
+ goto v4l2_fail;
+ }
+
+ trigger_cb_without_switch = false;
+ cam_sync_create_debugfs();
+
+ return rc;
+
+v4l2_fail:
+ v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
+register_fail:
+ cam_sync_media_controller_cleanup(sync_dev);
+mcinit_fail:
+ video_device_release(sync_dev->vdev);
+vdev_fail:
+ mutex_destroy(&sync_dev->table_lock);
+ kfree(sync_dev);
+ return rc;
+}
+
+static int cam_sync_remove(struct platform_device *pdev)
+{
+ v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
+ cam_sync_media_controller_cleanup(sync_dev);
+ video_device_release(sync_dev->vdev);
+ debugfs_remove_recursive(sync_dev->dentry);
+ sync_dev->dentry = NULL;
+ kfree(sync_dev);
+ sync_dev = NULL;
+
+ return 0;
+}
+
+static struct platform_device cam_sync_device = {
+ .name = "cam_sync",
+ .id = -1,
+};
+
+static struct platform_driver cam_sync_driver = {
+ .probe = cam_sync_probe,
+ .remove = cam_sync_remove,
+ .driver = {
+ .name = "cam_sync",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cam_sync_init(void)
+{
+ int rc;
+
+ rc = platform_device_register(&cam_sync_device);
+ if (rc)
+ return -ENODEV;
+
+ return platform_driver_register(&cam_sync_driver);
+}
+
+static void __exit cam_sync_exit(void)
+{
+ int idx;
+
+ for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
+ spin_lock_init(&sync_dev->row_spinlocks[idx]);
+ platform_driver_unregister(&cam_sync_driver);
+ platform_device_unregister(&cam_sync_device);
+ kfree(sync_dev);
+}
+
+module_init(cam_sync_init);
+module_exit(cam_sync_exit);
+MODULE_DESCRIPTION("Camera sync driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_api.h b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_api.h
new file mode 100644
index 000000000000..c735d51fe462
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_api.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_API_H__
+#define __CAM_SYNC_API_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/videodev2.h>
+#include <uapi/media/cam_sync.h>
+
+#define SYNC_DEBUG_NAME_LEN 63
+typedef void (*sync_callback)(int32_t sync_obj, int status, void *data);
+
+/* Kernel APIs */
+
+/**
+ * @brief: Creates a sync object
+ *
+ * The newly created sync obj is assigned to sync_obj.
+ * sync object.
+ *
+ * @param sync_obj : Pointer to int referencing the sync object.
+ * @param name : Optional parameter associating a name with the sync object for
+ * debug purposes. Only first SYNC_DEBUG_NAME_LEN bytes are accepted,
+ * rest will be ignored.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if sync_obj is an invalid pointer.
+ * -ENOMEM will be returned if the kernel can't allocate space for
+ * sync object.
+ */
+int cam_sync_create(int32_t *sync_obj, const char *name);
+
+/**
+ * @brief: Registers a callback with a sync object
+ *
+ * @param cb_func: Pointer to callback to be registered
+ * @param userdata: Opaque pointer which will be passed back with callback.
+ * @param sync_obj: int referencing the sync object.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ *
+ */
+int cam_sync_register_callback(sync_callback cb_func,
+ void *userdata, int32_t sync_obj);
+
+/**
+ * @brief: De-registers a callback with a sync object
+ *
+ * @param cb_func: Pointer to callback to be de-registered
+ * @param userdata: Opaque pointer which will be passed back with callback.
+ * @param sync_obj: int referencing the sync object.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ */
+int cam_sync_deregister_callback(sync_callback cb_func,
+ void *userdata, int32_t sync_obj);
+
+/**
+ * @brief: Signals a sync object with the status argument.
+ *
+ * This function will signal the sync object referenced by the sync_obj
+ * parameter and when doing so, will trigger callbacks in both user space and
+ * kernel. Callbacks will triggered asynchronously and their order of execution
+ * is not guaranteed. The status parameter will indicate whether the entity
+ * performing the signaling wants to convey an error case or a success case.
+ *
+ * @param sync_obj: int referencing the sync object.
+ * @param status: Status of the signaling. Can be either SYNC_SIGNAL_ERROR or
+ * SYNC_SIGNAL_SUCCESS.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_signal(int32_t sync_obj, uint32_t status);
+
+/**
+ * @brief: Merges multiple sync objects
+ *
+ * This function will merge multiple sync objects into a sync group.
+ *
+ * @param sync_obj: pointer to a block of ints to be merged
+ * @param num_objs: Number of ints in the block
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj);
+
+/**
+ * @brief: get ref count of sync obj
+ *
+ * This function will increment ref count for the sync object, and the ref
+ * count will be decremented when this sync object is signaled.
+ *
+ * @param sync_obj: sync object
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_get_obj_ref(int32_t sync_obj);
+
+/**
+ * @brief: put ref count of sync obj
+ *
+ * This function will decrement ref count for the sync object.
+ *
+ * @param sync_obj: sync object
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_put_obj_ref(int32_t sync_obj);
+
+/**
+ * @brief: Destroys a sync object
+ *
+ * @param sync_obj: int referencing the sync object to be destroyed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_destroy(int32_t sync_obj);
+
+/**
+ * @brief: Waits for a sync object synchronously
+ *
+ * Does a wait on the sync object identified by sync_obj for a maximum
+ * of timeout_ms milliseconds. Must not be called from interrupt context as
+ * this API can sleep. Should be called from process context only.
+ *
+ * @param sync_obj: int referencing the sync object to be waited upon
+ * @timeout_ms sync_obj: Timeout in ms.
+ *
+ * @return 0 upon success, -EINVAL if sync object is in bad state or arguments
+ * are invalid, -ETIMEDOUT if wait times out.
+ */
+int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms);
+
+
+#endif /* __CAM_SYNC_API_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_private.h
new file mode 100644
index 000000000000..eb2fb34fc33c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_private.h
@@ -0,0 +1,203 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_PRIVATE_H__
+#define __CAM_SYNC_PRIVATE_H__
+
+#include <linux/bitmap.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#ifdef CONFIG_CAM_SYNC_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CAM_SYNC_OBJ_NAME_LEN 64
+#define CAM_SYNC_MAX_OBJS 1024
+#define CAM_SYNC_MAX_V4L2_EVENTS 50
+#define CAM_SYNC_DEBUG_FILENAME "cam_debug"
+#define CAM_SYNC_DEBUG_BASEDIR "cam"
+#define CAM_SYNC_DEBUG_BUF_SIZE 32
+#define CAM_SYNC_PAYLOAD_WORDS 2
+#define CAM_SYNC_NAME "cam_sync"
+#define CAM_SYNC_WORKQUEUE_NAME "HIPRIO_SYNC_WORK_QUEUE"
+
+#define CAM_SYNC_TYPE_INDV 0
+#define CAM_SYNC_TYPE_GROUP 1
+
+/**
+ * enum sync_type - Enum to indicate the type of sync object,
+ * i.e. individual or group.
+ *
+ * @SYNC_TYPE_INDV : Object is an individual sync object
+ * @SYNC_TYPE_GROUP : Object is a group sync object
+ */
+enum sync_type {
+ SYNC_TYPE_INDV,
+ SYNC_TYPE_GROUP
+};
+
+/**
+ * enum sync_list_clean_type - Enum to indicate the type of list clean action
+ * to be peformed, i.e. specific sync ID or all list sync ids.
+ *
+ * @SYNC_CLEAN_ONE : Specific object to be cleaned in the list
+ * @SYNC_CLEAN_ALL : Clean all objects in the list
+ */
+enum sync_list_clean_type {
+ SYNC_LIST_CLEAN_ONE,
+ SYNC_LIST_CLEAN_ALL
+};
+
+/**
+ * struct sync_parent_info - Single node of information about a parent
+ * of a sync object, usually part of the parents linked list
+ *
+ * @sync_id : Sync object id of parent
+ * @list : List member used to append this node to a linked list
+ */
+struct sync_parent_info {
+ int32_t sync_id;
+ struct list_head list;
+};
+
+/**
+ * struct sync_parent_info - Single node of information about a child
+ * of a sync object, usually part of the children linked list
+ *
+ * @sync_id : Sync object id of child
+ * @list : List member used to append this node to a linked list
+ */
+struct sync_child_info {
+ int32_t sync_id;
+ struct list_head list;
+};
+
+
+/**
+ * struct sync_callback_info - Single node of information about a kernel
+ * callback registered on a sync object
+ *
+ * @callback_func : Callback function, registered by client driver
+ * @cb_data : Callback data, registered by client driver
+ * @status........ : Status with which callback will be invoked in client
+ * @sync_obj : Sync id of the object for which callback is registered
+ * @cb_dispatch_work : Work representing the call dispatch
+ * @list : List member used to append this node to a linked list
+ */
+struct sync_callback_info {
+ sync_callback callback_func;
+ void *cb_data;
+ int status;
+ int32_t sync_obj;
+ struct work_struct cb_dispatch_work;
+ struct list_head list;
+};
+
+/**
+ * struct sync_user_payload - Single node of information about a user space
+ * payload registered from user space
+ *
+ * @payload_data : Payload data, opaque to kernel
+ * @list : List member used to append this node to a linked list
+ */
+struct sync_user_payload {
+ uint64_t payload_data[CAM_SYNC_PAYLOAD_WORDS];
+ struct list_head list;
+};
+
+/**
+ * struct sync_table_row - Single row of information about a sync object, used
+ * for internal book keeping in the sync driver
+ *
+ * @name : Optional string representation of the sync object
+ * @type : Type of the sync object (individual or group)
+ * @sync_id : Integer id representing this sync object
+ * @parents_list : Linked list of parents of this sync object
+ * @children_list : Linked list of children of this sync object
+ * @state : State (INVALID, ACTIVE, SIGNALED_SUCCESS or
+ * SIGNALED_ERROR)
+ * @remaining : Count of remaining children that not been signaled
+ * @signaled : Completion variable on which block calls will wait
+ * @callback_list : Linked list of kernel callbacks registered
+ * @user_payload_list : LInked list of user space payloads registered
+ * @ref_cnt : ref count of the number of usage of the fence.
+ */
+struct sync_table_row {
+ char name[CAM_SYNC_OBJ_NAME_LEN];
+ enum sync_type type;
+ int32_t sync_id;
+ /* List of parents, which are merged objects */
+ struct list_head parents_list;
+ /* List of children, which constitute the merged object */
+ struct list_head children_list;
+ uint32_t state;
+ uint32_t remaining;
+ struct completion signaled;
+ struct list_head callback_list;
+ struct list_head user_payload_list;
+ atomic_t ref_cnt;
+};
+
+/**
+ * struct cam_signalable_info - Information for a single sync object that is
+ * ready to be signaled
+ *
+ * @sync_obj : Sync object id of signalable object
+ * @status : Status with which to signal
+ * @list : List member used to append this node to a linked list
+ */
+struct cam_signalable_info {
+ int32_t sync_obj;
+ uint32_t status;
+ struct list_head list;
+};
+
+/**
+ * struct sync_device - Internal struct to book keep sync driver details
+ *
+ * @vdev : Video device
+ * @v4l2_dev : V4L2 device
+ * @sync_table : Table of all sync objects
+ * @row_spinlocks : Spinlock array, one for each row in the table
+ * @table_lock : Mutex used to lock the table
+ * @open_cnt : Count of file open calls made on the sync driver
+ * @dentry : Debugfs entry
+ * @work_queue : Work queue used for dispatching kernel callbacks
+ * @cam_sync_eventq : Event queue used to dispatch user payloads to user space
+ * @bitmap : Bitmap representation of all sync objects
+ */
+struct sync_device {
+ struct video_device *vdev;
+ struct v4l2_device v4l2_dev;
+ struct sync_table_row sync_table[CAM_SYNC_MAX_OBJS];
+ spinlock_t row_spinlocks[CAM_SYNC_MAX_OBJS];
+ struct mutex table_lock;
+ int open_cnt;
+ struct dentry *dentry;
+ struct workqueue_struct *work_queue;
+ struct v4l2_fh *cam_sync_eventq;
+ spinlock_t cam_sync_eventq_lock;
+ DECLARE_BITMAP(bitmap, CAM_SYNC_MAX_OBJS);
+};
+
+
+#endif /* __CAM_SYNC_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_util.c
new file mode 100644
index 000000000000..5666218af76e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_util.c
@@ -0,0 +1,457 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sync_util.h"
+
+int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
+ long *idx)
+{
+ int rc = 0;
+
+ mutex_lock(&sync_dev->table_lock);
+
+ *idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+
+ if (*idx < CAM_SYNC_MAX_OBJS)
+ set_bit(*idx, sync_dev->bitmap);
+ else
+ rc = -1;
+
+ mutex_unlock(&sync_dev->table_lock);
+
+ return rc;
+}
+
+int cam_sync_init_row(struct sync_table_row *table,
+ uint32_t idx, const char *name, uint32_t type)
+{
+ struct sync_table_row *row = table + idx;
+
+ if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+ return -EINVAL;
+
+ memset(row, 0, sizeof(*row));
+
+ if (name)
+ strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
+ INIT_LIST_HEAD(&row->parents_list);
+ INIT_LIST_HEAD(&row->children_list);
+ row->type = type;
+ row->sync_id = idx;
+ row->state = CAM_SYNC_STATE_ACTIVE;
+ row->remaining = 0;
+ atomic_set(&row->ref_cnt, 0);
+ init_completion(&row->signaled);
+ INIT_LIST_HEAD(&row->callback_list);
+ INIT_LIST_HEAD(&row->user_payload_list);
+ CAM_DBG(CAM_SYNC,
+ "row name:%s sync_id:%i [idx:%u] row_state:%u ",
+ row->name, row->sync_id, idx, row->state);
+
+ return 0;
+}
+
+int cam_sync_init_group_object(struct sync_table_row *table,
+ uint32_t idx,
+ uint32_t *sync_objs,
+ uint32_t num_objs)
+{
+ int i, rc = 0;
+ struct sync_child_info *child_info;
+ struct sync_parent_info *parent_info;
+ struct sync_table_row *row = table + idx;
+ struct sync_table_row *child_row = NULL;
+
+ cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
+
+ /*
+ * While traversing for children, parent's row list is updated with
+ * child info and each child's row is updated with parent info.
+ * If any child state is ERROR or SUCCESS, it will not be added to list.
+ */
+ for (i = 0; i < num_objs; i++) {
+ child_row = table + sync_objs[i];
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+
+ /* validate child */
+ if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
+ (child_row->state == CAM_SYNC_STATE_INVALID)) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ CAM_ERR(CAM_SYNC,
+ "Invalid child fence:%i state:%u type:%u",
+ child_row->sync_id, child_row->state,
+ child_row->type);
+ rc = -EINVAL;
+ goto clean_children_info;
+ }
+
+ /* check for child's state */
+ if (child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
+ row->state = CAM_SYNC_STATE_SIGNALED_ERROR;
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ continue;
+ }
+ if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ continue;
+ }
+
+ row->remaining++;
+
+ /* Add child info */
+ child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
+ if (!child_info) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ rc = -ENOMEM;
+ goto clean_children_info;
+ }
+ child_info->sync_id = sync_objs[i];
+ list_add_tail(&child_info->list, &row->children_list);
+
+ /* Add parent info */
+ parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
+ if (!parent_info) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ rc = -ENOMEM;
+ goto clean_children_info;
+ }
+ parent_info->sync_id = idx;
+ list_add_tail(&parent_info->list, &child_row->parents_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ }
+
+ if (!row->remaining) {
+ if (row->state != CAM_SYNC_STATE_SIGNALED_ERROR)
+ row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+ complete_all(&row->signaled);
+ }
+
+ return 0;
+
+clean_children_info:
+ row->state = CAM_SYNC_STATE_INVALID;
+ for (i = i-1; i >= 0; i--) {
+ spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ child_row = table + sync_objs[i];
+ cam_sync_util_cleanup_parents_list(child_row,
+ SYNC_LIST_CLEAN_ONE, idx);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+ }
+
+ cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
+ return rc;
+}
+
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
+{
+ struct sync_table_row *row = table + idx;
+ struct sync_child_info *child_info, *temp_child;
+ struct sync_callback_info *sync_cb, *temp_cb;
+ struct sync_parent_info *parent_info, *temp_parent;
+ struct sync_user_payload *upayload_info, *temp_upayload;
+ struct sync_table_row *child_row = NULL, *parent_row = NULL;
+ struct list_head temp_child_list, temp_parent_list;
+
+ if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+ return -EINVAL;
+
+ CAM_DBG(CAM_SYNC,
+ "row name:%s sync_id:%i [idx:%u] row_state:%u",
+ row->name, row->sync_id, idx, row->state);
+
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj: idx = %d",
+ idx);
+ return -EINVAL;
+ }
+
+ if (row->state == CAM_SYNC_STATE_ACTIVE)
+ CAM_DBG(CAM_SYNC,
+ "Destroying an active sync object name:%s id:%i",
+ row->name, row->sync_id);
+
+ row->state = CAM_SYNC_STATE_INVALID;
+
+ /* Object's child and parent objects will be added into this list */
+ INIT_LIST_HEAD(&temp_child_list);
+ INIT_LIST_HEAD(&temp_parent_list);
+
+ list_for_each_entry_safe(child_info, temp_child, &row->children_list,
+ list) {
+ if (child_info->sync_id <= 0)
+ continue;
+
+ list_del_init(&child_info->list);
+ list_add_tail(&child_info->list, &temp_child_list);
+ }
+
+ list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
+ list) {
+ if (parent_info->sync_id <= 0)
+ continue;
+
+ list_del_init(&parent_info->list);
+ list_add_tail(&parent_info->list, &temp_parent_list);
+ }
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+ /* Cleanup the child to parent link from child list */
+ while (!list_empty(&temp_child_list)) {
+ child_info = list_first_entry(&temp_child_list,
+ struct sync_child_info, list);
+ child_row = sync_dev->sync_table + child_info->sync_id;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
+
+ if (child_row->state == CAM_SYNC_STATE_INVALID) {
+ list_del_init(&child_info->list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[
+ child_info->sync_id]);
+ kfree(child_info);
+ continue;
+ }
+
+ if (child_row->state == CAM_SYNC_STATE_ACTIVE)
+ CAM_DBG(CAM_SYNC,
+ "Warning: destroying active child sync obj = %d",
+ child_info->sync_id);
+
+ cam_sync_util_cleanup_parents_list(child_row,
+ SYNC_LIST_CLEAN_ONE, idx);
+
+ list_del_init(&child_info->list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
+ kfree(child_info);
+ }
+
+ /* Cleanup the parent to child link */
+ while (!list_empty(&temp_parent_list)) {
+ parent_info = list_first_entry(&temp_parent_list,
+ struct sync_parent_info, list);
+ parent_row = sync_dev->sync_table + parent_info->sync_id;
+
+ spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+
+ if (parent_row->state == CAM_SYNC_STATE_INVALID) {
+ list_del_init(&parent_info->list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[
+ parent_info->sync_id]);
+ kfree(parent_info);
+ continue;
+ }
+
+ if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
+ CAM_DBG(CAM_SYNC,
+ "Warning: destroying active parent sync obj = %d",
+ parent_info->sync_id);
+
+ cam_sync_util_cleanup_children_list(parent_row,
+ SYNC_LIST_CLEAN_ONE, idx);
+
+ list_del_init(&parent_info->list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+ kfree(parent_info);
+ }
+
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ list_for_each_entry_safe(upayload_info, temp_upayload,
+ &row->user_payload_list, list) {
+ list_del_init(&upayload_info->list);
+ kfree(upayload_info);
+ }
+
+ list_for_each_entry_safe(sync_cb, temp_cb,
+ &row->callback_list, list) {
+ list_del_init(&sync_cb->list);
+ kfree(sync_cb);
+ }
+
+ memset(row, 0, sizeof(*row));
+ clear_bit(idx, sync_dev->bitmap);
+ INIT_LIST_HEAD(&row->callback_list);
+ INIT_LIST_HEAD(&row->parents_list);
+ INIT_LIST_HEAD(&row->children_list);
+ INIT_LIST_HEAD(&row->user_payload_list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+ CAM_DBG(CAM_SYNC, "Destroying sync obj:%d successful", idx);
+ return 0;
+}
+
+void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
+{
+ struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
+ struct sync_callback_info,
+ cb_dispatch_work);
+
+ cb_info->callback_func(cb_info->sync_obj,
+ cb_info->status,
+ cb_info->cb_data);
+
+ kfree(cb_info);
+}
+
+void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
+ uint32_t status)
+{
+ struct sync_callback_info *sync_cb;
+ struct sync_user_payload *payload_info;
+ struct sync_callback_info *temp_sync_cb;
+ struct sync_table_row *signalable_row;
+ struct sync_user_payload *temp_payload_info;
+
+ signalable_row = sync_dev->sync_table + sync_obj;
+ if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_DBG(CAM_SYNC,
+ "Accessing invalid sync object:%i", sync_obj);
+ return;
+ }
+
+ /* Dispatch kernel callbacks if any were registered earlier */
+ list_for_each_entry_safe(sync_cb,
+ temp_sync_cb, &signalable_row->callback_list, list) {
+ sync_cb->status = status;
+ list_del_init(&sync_cb->list);
+ queue_work(sync_dev->work_queue,
+ &sync_cb->cb_dispatch_work);
+ }
+
+ /* Dispatch user payloads if any were registered earlier */
+ list_for_each_entry_safe(payload_info, temp_payload_info,
+ &signalable_row->user_payload_list, list) {
+ spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+ if (!sync_dev->cam_sync_eventq) {
+ spin_unlock_bh(
+ &sync_dev->cam_sync_eventq_lock);
+ break;
+ }
+ spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+ cam_sync_util_send_v4l2_event(
+ CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+ sync_obj,
+ status,
+ payload_info->payload_data,
+ CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+ list_del_init(&payload_info->list);
+ /*
+ * We can free the list node here because
+ * sending V4L event will make a deep copy
+ * anyway
+ */
+ kfree(payload_info);
+ }
+
+ /*
+ * This needs to be done because we want to unblock anyone
+ * who might be blocked and waiting on this sync object
+ */
+ complete_all(&signalable_row->signaled);
+}
+
+void cam_sync_util_send_v4l2_event(uint32_t id,
+ uint32_t sync_obj,
+ int status,
+ void *payload,
+ int len)
+{
+ struct v4l2_event event;
+ __u64 *payload_data = NULL;
+ struct cam_sync_ev_header *ev_header = NULL;
+
+ event.id = id;
+ event.type = CAM_SYNC_V4L_EVENT;
+
+ ev_header = CAM_SYNC_GET_HEADER_PTR(event);
+ ev_header->sync_obj = sync_obj;
+ ev_header->status = status;
+
+ payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
+ memcpy(payload_data, payload, len);
+
+ v4l2_event_queue(sync_dev->vdev, &event);
+ CAM_DBG(CAM_SYNC, "send v4l2 event for sync_obj :%d",
+ sync_obj);
+}
+
+int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
+ int new_state)
+{
+ int rc = 0;
+
+ switch (parent_row->state) {
+ case CAM_SYNC_STATE_ACTIVE:
+ case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+ parent_row->state = new_state;
+ break;
+
+ case CAM_SYNC_STATE_SIGNALED_ERROR:
+ break;
+
+ case CAM_SYNC_STATE_INVALID:
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
+ uint32_t list_clean_type, uint32_t sync_obj)
+{
+ struct sync_child_info *child_info = NULL;
+ struct sync_child_info *temp_child_info = NULL;
+ uint32_t curr_sync_obj;
+
+ list_for_each_entry_safe(child_info,
+ temp_child_info, &row->children_list, list) {
+ if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
+ (child_info->sync_id != sync_obj))
+ continue;
+
+ curr_sync_obj = child_info->sync_id;
+ list_del_init(&child_info->list);
+ kfree(child_info);
+
+ if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
+ (curr_sync_obj == sync_obj))
+ break;
+ }
+}
+
+void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
+ uint32_t list_clean_type, uint32_t sync_obj)
+{
+ struct sync_parent_info *parent_info = NULL;
+ struct sync_parent_info *temp_parent_info = NULL;
+ uint32_t curr_sync_obj;
+
+ list_for_each_entry_safe(parent_info,
+ temp_parent_info, &row->parents_list, list) {
+ if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
+ (parent_info->sync_id != sync_obj))
+ continue;
+
+ curr_sync_obj = parent_info->sync_id;
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
+
+ if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
+ (curr_sync_obj == sync_obj))
+ break;
+ }
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_util.h
new file mode 100644
index 000000000000..cfa450c0e744
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_sync/cam_sync_util.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_SYNC_UTIL_H__
+#define __CAM_SYNC_UTIL_H__
+
+
+#include <cam_sync_api.h>
+#include "cam_sync_private.h"
+#include "cam_debug_util.h"
+
+extern struct sync_device *sync_dev;
+
+/**
+ * @brief: Finds an empty row in the sync table and sets its corresponding bit
+ * in the bit array
+ *
+ * @param sync_dev : Pointer to the sync device instance
+ * @param idx : Pointer to an long containing the index found in the bit
+ * array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
+ long *idx);
+
+/**
+ * @brief: Function to initialize an empty row in the sync table. This should be
+ * called only for individual sync objects.
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx : Index of row to initialize
+ * @param name : Optional string representation of the sync object. Should be
+ * 63 characters or less
+ * @param type : type of row to be initialized
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_init_row(struct sync_table_row *table,
+ uint32_t idx, const char *name, uint32_t type);
+
+/**
+ * @brief: Function to uninitialize a row in the sync table
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx : Index of row to initialize
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+
+/**
+ * @brief: Function to initialize a row in the sync table when the object is a
+ * group object, also known as a merged sync object
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx : Index of row to initialize
+ * @param sync_objs : Array of sync objects which will merged
+ * or grouped together
+ * @param num_objs : Number of sync objects in the array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_init_group_object(struct sync_table_row *table,
+ uint32_t idx,
+ uint32_t *sync_objs,
+ uint32_t num_objs);
+
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+
+/**
+ * @brief: Function to dispatch a kernel callback for a sync callback
+ *
+ * @param cb_dispatch_work : Pointer to the work_struct that needs to be
+ * dispatched
+ *
+ * @return None
+ */
+void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work);
+
+/**
+ * @brief: Function to dispatch callbacks for a signaled sync object
+ *
+ * @sync_obj : Sync object that is signaled
+ * @status : Status of the signaled object
+ *
+ * @return None
+ */
+void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
+ uint32_t status);
+
+/**
+ * @brief: Function to send V4L event to user space
+ * @param id : V4L event id to send
+ * @param sync_obj : Sync obj for which event needs to be sent
+ * @param status : Status of the event
+ * @payload : Payload that needs to be sent to user space
+ * @len : Length of the payload
+ *
+ * @return None
+ */
+void cam_sync_util_send_v4l2_event(uint32_t id,
+ uint32_t sync_obj,
+ int status,
+ void *payload,
+ int len);
+
+/**
+ * @brief: Function which gets the next state of the sync object based on the
+ * current state and the new state
+ *
+ * @param current_state : Current state of the sync object
+ * @param new_state : New state of the sync object
+ *
+ * @return Next state of the sync object
+ */
+int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
+ int new_state);
+
+/**
+ * @brief: Function to clean up the children of a sync object
+ * @row : Row whose child list to clean
+ * @list_clean_type : Clean specific object or clean all objects
+ * @sync_obj : Sync object to be clean if list clean type is
+ * SYNC_LIST_CLEAN_ONE
+ *
+ * @return None
+ */
+void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
+ uint32_t list_clean_type, uint32_t sync_obj);
+
+/**
+ * @brief: Function to clean up the parents of a sync object
+ * @row : Row whose parent list to clean
+ * @list_clean_type : Clean specific object or clean all objects
+ * @sync_obj : Sync object to be clean if list clean type is
+ * SYNC_LIST_CLEAN_ONE
+ *
+ * @return None
+ */
+void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
+ uint32_t list_clean_type, uint32_t sync_obj);
+
+#endif /* __CAM_SYNC_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/Makefile b/drivers/media/platform/msm/camera_v3/cam_utils/Makefile
new file mode 100644
index 000000000000..7e1ebd3b7c64
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera_v3/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o cam_debug_util.o cam_trace.o cam_common_util.o
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_common_util.c b/drivers/media/platform/msm/camera_v3/cam_utils/cam_common_util.c
new file mode 100644
index 000000000000..bba12cf106df
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_common_util.c
@@ -0,0 +1,57 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+
+int cam_common_util_get_string_index(const char **strings,
+ uint32_t num_strings, char *matching_string, uint32_t *index)
+{
+ int i;
+
+ for (i = 0; i < num_strings; i++) {
+ if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
+ CAM_DBG(CAM_UTIL, "matched %s : %d\n",
+ matching_string, i);
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+uint32_t cam_common_util_remove_duplicate_arr(int32_t *arr, uint32_t num)
+{
+ int i, j;
+ uint32_t wr_idx = 1;
+
+ if (!arr) {
+ CAM_ERR(CAM_UTIL, "Null input array");
+ return 0;
+ }
+
+ for (i = 1; i < num; i++) {
+ for (j = 0; j < wr_idx ; j++) {
+ if (arr[i] == arr[j])
+ break;
+ }
+ if (j == wr_idx)
+ arr[wr_idx++] = arr[i];
+ }
+
+ return wr_idx;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_common_util.h b/drivers/media/platform/msm/camera_v3/cam_utils/cam_common_util.h
new file mode 100644
index 000000000000..47d441fe3aa1
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_common_util.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_COMMON_UTIL_H_
+#define _CAM_COMMON_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define CAM_BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+
+#define PTR_TO_U64(ptr) ((uint64_t)(uintptr_t)ptr)
+#define U64_TO_PTR(ptr) ((void *)(uintptr_t)ptr)
+
+/**
+ * cam_common_util_get_string_index()
+ *
+ * @brief Match the string from list of strings to return
+ * matching index
+ *
+ * @strings: Pointer to list of strings
+ * @num_strings: Number of strings in 'strings'
+ * @matching_string: String to match
+ * @index: Pointer to index to return matching index
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_common_util_get_string_index(const char **strings,
+ uint32_t num_strings, char *matching_string, uint32_t *index);
+
+/**
+ * cam_common_util_remove_duplicate_arr()
+ *
+ * @brief Move all the unique integers to the start of
+ * the array and return the number of unique integers
+ *
+ * @array: Pointer to the first integer of array
+ * @num: Number of elements in array
+ *
+ * @return: Number of unique integers in array
+ */
+uint32_t cam_common_util_remove_duplicate_arr(int32_t *array,
+ uint32_t num);
+
+#endif /* _CAM_COMMON_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_debug_util.c b/drivers/media/platform/msm/camera_v3/cam_utils/cam_debug_util.c
new file mode 100644
index 000000000000..4f326342e3a7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_debug_util.c
@@ -0,0 +1,124 @@
+/* Copyright (c) 2017-2018, The Linux Foundataion. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include "cam_debug_util.h"
+
+static uint debug_mdl;
+module_param(debug_mdl, uint, 0644);
+
+const char *cam_get_module_name(unsigned int module_id)
+{
+ const char *name = NULL;
+
+ switch (module_id) {
+ case CAM_CDM:
+ name = "CAM-CDM";
+ break;
+ case CAM_CORE:
+ name = "CAM-CORE";
+ break;
+ case CAM_CRM:
+ name = "CAM-CRM";
+ break;
+ case CAM_CPAS:
+ name = "CAM-CPAS";
+ break;
+ case CAM_ISP:
+ name = "CAM-ISP";
+ break;
+ case CAM_SENSOR:
+ name = "CAM-SENSOR";
+ break;
+ case CAM_SMMU:
+ name = "CAM-SMMU";
+ break;
+ case CAM_SYNC:
+ name = "CAM-SYNC";
+ break;
+ case CAM_ICP:
+ name = "CAM-ICP";
+ break;
+ case CAM_JPEG:
+ name = "CAM-JPEG";
+ break;
+ case CAM_FD:
+ name = "CAM-FD";
+ break;
+ case CAM_LRME:
+ name = "CAM-LRME";
+ break;
+ case CAM_FLASH:
+ name = "CAM-FLASH";
+ break;
+ case CAM_ACTUATOR:
+ name = "CAM-ACTUATOR";
+ break;
+ case CAM_CCI:
+ name = "CAM-CCI";
+ break;
+ case CAM_CSIPHY:
+ name = "CAM-CSIPHY";
+ break;
+ case CAM_EEPROM:
+ name = "CAM-EEPROM";
+ break;
+ case CAM_UTIL:
+ name = "CAM-UTIL";
+ break;
+ case CAM_CTXT:
+ name = "CAM-CTXT";
+ break;
+ case CAM_HFI:
+ name = "CAM-HFI";
+ break;
+ case CAM_OIS:
+ name = "CAM-OIS";
+ break;
+ case CAM_IRQ_CTRL:
+ name = "CAM-IRQ-CTRL";
+ break;
+ case CAM_MEM:
+ name = "CAM-MEM";
+ break;
+ case CAM_PERF:
+ name = "CAM-PERF";
+ break;
+ case CAM_REQ:
+ name = "CAM-REQ";
+ break;
+ default:
+ name = "CAM";
+ break;
+ }
+
+ return name;
+}
+
+void cam_debug_log(unsigned int module_id, const char *func, const int line,
+ const char *fmt, ...)
+{
+ char str_buffer[STR_BUFFER_MAX_LENGTH];
+ va_list args;
+
+ va_start(args, fmt);
+
+ if (debug_mdl & module_id) {
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_info("CAM_DBG: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id),
+ func, line, str_buffer);
+ va_end(args);
+ }
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera_v3/cam_utils/cam_debug_util.h
new file mode 100644
index 000000000000..9093517de1e4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_debug_util.h
@@ -0,0 +1,141 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_DEBUG_UTIL_H_
+#define _CAM_DEBUG_UTIL_H_
+
+#define CAM_CDM (1 << 0)
+#define CAM_CORE (1 << 1)
+#define CAM_CPAS (1 << 2)
+#define CAM_ISP (1 << 3)
+#define CAM_CRM (1 << 4)
+#define CAM_SENSOR (1 << 5)
+#define CAM_SMMU (1 << 6)
+#define CAM_SYNC (1 << 7)
+#define CAM_ICP (1 << 8)
+#define CAM_JPEG (1 << 9)
+#define CAM_FD (1 << 10)
+#define CAM_LRME (1 << 11)
+#define CAM_FLASH (1 << 12)
+#define CAM_ACTUATOR (1 << 13)
+#define CAM_CCI (1 << 14)
+#define CAM_CSIPHY (1 << 15)
+#define CAM_EEPROM (1 << 16)
+#define CAM_UTIL (1 << 17)
+#define CAM_HFI (1 << 18)
+#define CAM_CTXT (1 << 19)
+#define CAM_OIS (1 << 20)
+#define CAM_RES (1 << 21)
+#define CAM_MEM (1 << 22)
+
+/* CAM_IRQ_CTRL: For events in irq controller */
+#define CAM_IRQ_CTRL (1 << 23)
+
+/* CAM_REQ: Tracks a request submitted to KMD */
+#define CAM_REQ (1 << 24)
+
+/* CAM_PERF: Used for performance (clock, BW etc) logs */
+#define CAM_PERF (1 << 25)
+
+#define STR_BUFFER_MAX_LENGTH 1024
+
+/*
+ * cam_debug_log()
+ *
+ * @brief : Get the Module name from module ID and print
+ * respective debug logs
+ *
+ * @module_id : Respective Module ID which is calling this function
+ * @func : Function which is calling to print logs
+ * @line : Line number associated with the function which is calling
+ * to print log
+ * @fmt : Formatted string which needs to be print in the log
+ *
+ */
+void cam_debug_log(unsigned int module_id, const char *func, const int line,
+ const char *fmt, ...);
+
+/*
+ * cam_get_module_name()
+ *
+ * @brief : Get the module name from module ID
+ *
+ * @module_id : Module ID which is using this function
+ */
+const char *cam_get_module_name(unsigned int module_id);
+
+/*
+ * CAM_ERR
+ * @brief : This Macro will print error logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_ERR(__module, fmt, args...) \
+ pr_err("CAM_ERR: %s: %s: %d " fmt "\n", \
+ cam_get_module_name(__module), __func__, __LINE__, ##args)
+/*
+ * CAM_WARN
+ * @brief : This Macro will print warning logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_WARN(__module, fmt, args...) \
+ pr_warn("CAM_WARN: %s: %s: %d " fmt "\n", \
+ cam_get_module_name(__module), __func__, __LINE__, ##args)
+/*
+ * CAM_INFO
+ * @brief : This Macro will print Information logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_INFO(__module, fmt, args...) \
+ pr_info("CAM_INFO: %s: %s: %d " fmt "\n", \
+ cam_get_module_name(__module), __func__, __LINE__, ##args)
+
+/*
+ * CAM_INFO_RATE_LIMIT
+ * @brief : This Macro will print info logs with ratelimit
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_INFO_RATE_LIMIT(__module, fmt, args...) \
+ pr_err_ratelimited("CAM_INFO: %s: %s: %d " fmt "\n", \
+ cam_get_module_name(__module), __func__, __LINE__, ##args)
+
+/*
+ * CAM_DBG
+ * @brief : This Macro will print debug logs when enabled using GROUP
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_DBG(__module, fmt, args...) \
+ cam_debug_log(__module, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_ERR_RATE_LIMIT
+ * @brief : This Macro will print error print logs with ratelimit
+ */
+#define CAM_ERR_RATE_LIMIT(__module, fmt, args...) \
+ pr_err_ratelimited("CAM_ERR: %s: %s: %d " fmt "\n", \
+ cam_get_module_name(__module), __func__, __LINE__, ##args)
+
+#endif /* _CAM_DEBUG_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera_v3/cam_utils/cam_io_util.c
new file mode 100644
index 000000000000..0fd6d31cae2e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_io_util.c
@@ -0,0 +1,287 @@
+/* Copyright (c) 2011-2014, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+
+int cam_io_w(uint32_t data, void __iomem *addr)
+{
+ if (!addr)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+ writel_relaxed_no_log(data, addr);
+
+ return 0;
+}
+
+int cam_io_w_mb(uint32_t data, void __iomem *addr)
+{
+ if (!addr)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+ /* Ensure previous writes are done */
+ wmb();
+ writel_relaxed_no_log(data, addr);
+ /* Ensure previous writes are done */
+ wmb();
+
+ return 0;
+}
+
+uint32_t cam_io_r(void __iomem *addr)
+{
+ uint32_t data;
+
+ if (!addr) {
+ CAM_ERR(CAM_UTIL, "Invalid args");
+ return 0;
+ }
+
+ data = readl_relaxed(addr);
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+
+ return data;
+}
+
+uint32_t cam_io_r_mb(void __iomem *addr)
+{
+ uint32_t data;
+
+ if (!addr) {
+ CAM_ERR(CAM_UTIL, "Invalid args");
+ return 0;
+ }
+
+ /* Ensure previous read is done */
+ rmb();
+ data = readl_relaxed(addr);
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+ /* Ensure previous read is done */
+ rmb();
+
+ return data;
+}
+
+int cam_io_memcpy(void __iomem *dest_addr,
+ void __iomem *src_addr, uint32_t len)
+{
+ int i;
+ uint32_t *d = (uint32_t *) dest_addr;
+ uint32_t *s = (uint32_t *) src_addr;
+
+ if (!dest_addr || !src_addr)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
+
+ for (i = 0; i < len/4; i++) {
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
+ writel_relaxed(*s++, d++);
+ }
+
+ return 0;
+}
+
+int cam_io_memcpy_mb(void __iomem *dest_addr,
+ void __iomem *src_addr, uint32_t len)
+{
+ int i;
+ uint32_t *d = (uint32_t *) dest_addr;
+ uint32_t *s = (uint32_t *) src_addr;
+
+ if (!dest_addr || !src_addr)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
+
+ /*
+ * Do not use cam_io_w_mb to avoid double wmb() after a write
+ * and before the next write.
+ */
+ wmb();
+ for (i = 0; i < (len / 4); i++) {
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
+ writel_relaxed(*s++, d++);
+ }
+ /* Ensure previous writes are done */
+ wmb();
+
+ return 0;
+}
+
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+ unsigned long min_usecs, unsigned long max_usecs)
+{
+ uint32_t tmp, cnt = 0;
+ int rc = 0;
+
+ if (!addr)
+ return -EINVAL;
+
+ tmp = readl_relaxed(addr);
+ while ((tmp != wait_data) && (cnt++ < retry)) {
+ if (min_usecs > 0 && max_usecs > 0)
+ usleep_range(min_usecs, max_usecs);
+ tmp = readl_relaxed(addr);
+ }
+
+ if (cnt > retry) {
+ CAM_DBG(CAM_UTIL, "Poll failed by value");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+ uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+ unsigned long max_usecs)
+{
+ uint32_t tmp, cnt = 0;
+ int rc = 0;
+
+ if (!addr)
+ return -EINVAL;
+
+ tmp = readl_relaxed(addr);
+ while (((tmp & bmask) != wait_data) && (cnt++ < retry)) {
+ if (min_usecs > 0 && max_usecs > 0)
+ usleep_range(min_usecs, max_usecs);
+ tmp = readl_relaxed(addr);
+ }
+
+ if (cnt > retry) {
+ CAM_DBG(CAM_UTIL, "Poll failed with mask");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+ uint32_t len)
+{
+ int i;
+
+ if (!data || !len || !addr)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++) {
+ CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
+ i, len, data[i], addr);
+ writel_relaxed(data[i], addr);
+ }
+
+ return 0;
+}
+
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+ uint32_t len)
+{
+ int i;
+
+ if (!data || !len || !addr)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++) {
+ CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
+ i, len, data[i], addr);
+ /* Ensure previous writes are done */
+ wmb();
+ writel_relaxed(data[i], addr);
+ }
+
+ return 0;
+}
+
+#define __OFFSET(__i) (data[__i][0])
+#define __VAL(__i) (data[__i][1])
+int cam_io_w_offset_val_block(const uint32_t data[][2],
+ void __iomem *addr_base, uint32_t len)
+{
+ int i;
+
+ if (!data || !len || !addr_base)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++) {
+ CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
+ i, len, __VAL(i), addr_base, __OFFSET(i));
+ writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+ }
+
+ return 0;
+}
+
+int cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+ void __iomem *addr_base, uint32_t len)
+{
+ int i;
+
+ if (!data || !len || !addr_base)
+ return -EINVAL;
+
+ /* Ensure write is done */
+ wmb();
+ for (i = 0; i < len; i++) {
+ CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
+ i, len, __VAL(i), addr_base, __OFFSET(i));
+ writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+ }
+
+ return 0;
+}
+
+#define BYTES_PER_REGISTER 4
+#define NUM_REGISTER_PER_LINE 4
+#define REG_OFFSET(__start, __i) (__start + (__i * BYTES_PER_REGISTER))
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size)
+{
+ char line_str[128];
+ char *p_str;
+ int i;
+ uint32_t data;
+
+ CAM_DBG(CAM_UTIL, "addr=%pK offset=0x%x size=%d",
+ base_addr, start_offset, size);
+
+ if (!base_addr || (size <= 0))
+ return -EINVAL;
+
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size; i++) {
+ if (i % NUM_REGISTER_PER_LINE == 0) {
+ snprintf(p_str, 12, "0x%08x: ",
+ REG_OFFSET(start_offset, i));
+ p_str += 11;
+ }
+ data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
+ snprintf(p_str, 9, "%08x ", data);
+ p_str += 8;
+ if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
+ CAM_ERR(CAM_UTIL, "%s", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ CAM_ERR(CAM_UTIL, "%s", line_str);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_io_util.h b/drivers/media/platform/msm/camera_v3/cam_utils/cam_io_util.h
new file mode 100644
index 000000000000..f1d219f32e4d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_io_util.h
@@ -0,0 +1,239 @@
+/* Copyright (c) 2011-2014, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IO_UTIL_H_
+#define _CAM_IO_UTIL_H_
+
+#include <linux/types.h>
+
+/**
+ * cam_io_w()
+ *
+ * @brief: Camera IO util for register write
+ *
+ * @data: Value to be written
+ * @addr: Address used to write the value
+ *
+ * @return: Success or Failure
+ */
+int cam_io_w(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_w_mb()
+ *
+ * @brief: Camera IO util for register write with memory barrier.
+ * Memory Barrier is only before the write to ensure the
+ * order. If need to ensure this write is also flushed
+ * call wmb() independently in the caller.
+ *
+ * @data: Value to be written
+ * @addr: Address used to write the value
+ *
+ * @return: Success or Failure
+ */
+int cam_io_w_mb(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_r()
+ *
+ * @brief: Camera IO util for register read
+ *
+ * @addr: Address of register to be read
+ *
+ * @return: Value read from the register address
+ */
+uint32_t cam_io_r(void __iomem *addr);
+
+/**
+ * cam_io_r_mb()
+ *
+ * @brief: Camera IO util for register read with memory barrier.
+ * Memory Barrier is only before the write to ensure the
+ * order. If need to ensure this write is also flushed
+ * call rmb() independently in the caller.
+ *
+ * @addr: Address of register to be read
+ *
+ * @return: Value read from the register address
+ */
+uint32_t cam_io_r_mb(void __iomem *addr);
+
+/**
+ * cam_io_memcpy()
+ *
+ * @brief: Camera IO util for memory to register copy
+ *
+ * @dest_addr: Destination register address
+ * @src_addr: Source regiser address
+ * @len: Range to be copied
+ *
+ * @return: Success or Failure
+ */
+int cam_io_memcpy(void __iomem *dest_addr,
+ void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_memcpy_mb()
+ *
+ * @brief: Camera IO util for memory to register copy
+ * with barrier.
+ * Memory Barrier is only before the write to ensure the
+ * order. If need to ensure this write is also flushed
+ * call wmb() independently in the caller.
+ *
+ * @dest_addr: Destination register address
+ * @src_addr: Source regiser address
+ * @len: Range to be copied
+ *
+ * @return: Success or Failure
+ */
+int cam_io_memcpy_mb(void __iomem *dest_addr,
+ void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_poll_value_wmask()
+ *
+ * @brief: Poll register value with bitmask.
+ *
+ * @addr: Register address to be polled
+ * @wait_data: Wait until @bmask read from @addr matches this data
+ * @bmask: Bit mask
+ * @retry: Number of retry
+ * @min_usecs: Minimum time to wait for retry
+ * @max_usecs: Maximum time to wait for retry
+ *
+ * @return: Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+ uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+ unsigned long max_usecs);
+
+/**
+ * cam_io_poll_value()
+ *
+ * @brief: Poll register value
+ *
+ * @addr: Register address to be polled
+ * @wait_data: Wait until value read from @addr matches this data
+ * @retry: Number of retry
+ * @min_usecs: Minimum time to wait for retry
+ * @max_usecs: Maximum time to wait for retry
+ *
+ * @return: Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+ unsigned long min_usecs, unsigned long max_usecs);
+
+/**
+ * cam_io_w_same_offset_block()
+ *
+ * @brief: Write a block of data to same address
+ *
+ * @data: Block data to be written
+ * @addr: Register offset to be written.
+ * @len: Number of the data to be written
+ *
+ * @return: Success or Failure
+ */
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+ uint32_t len);
+
+/**
+ * cam_io_w_mb_same_offset_block()
+ *
+ * @brief: Write a block of data to same address with barrier.
+ * Memory Barrier is only before the write to ensure the
+ * order. If need to ensure this write is also flushed
+ * call wmb() independently in the caller.
+ *
+ * @data: Block data to be written
+ * @addr: Register offset to be written.
+ * @len: Number of the data to be written
+ *
+ * @return: Success or Failure
+ */
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+ uint32_t len);
+
+/**
+ * cam_io_w_offset_val_block()
+ *
+ * @brief: This API is to write a block of registers
+ * represented by a 2 dimensional array table with
+ * register offset and value pair
+ *
+ * offset0, value0,
+ * offset1, value1,
+ * offset2, value2,
+ * and so on...
+ *
+ * @data: Pointer to 2-dimensional offset-value array
+ * @addr_base: Base address to which offset will be added to
+ * get the register address
+ * @len: Length of offset-value pair array to be written in
+ * number of uin32_t
+ *
+ * @return: Success or Failure
+ *
+ */
+int32_t cam_io_w_offset_val_block(const uint32_t data[][2],
+ void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_w_mb_offset_val_block()
+ *
+ * @brief: This API is to write a block of registers
+ * represented by a 2 dimensional array table with
+ * register offset and value pair with memory barrier.
+ * Memory Barrier is only before the write to ensure the
+ * order. If need to ensure this write is also flushed
+ * call wmb() independently in the caller.
+ * The OFFSETS NEED to be different because of the way
+ * barrier is used here.
+ *
+ * offset0, value0,
+ * offset1, value1,
+ * offset2, value2,
+ * and so on...
+ *
+ * @data: Pointer to 2-dimensional offset-value array
+ * @addr_base: Base address to which offset will be added to
+ * get the register address
+ * @len: Length of offset-value pair array to be written in
+ * number of uin32_t
+ *
+ * @return: Success or Failure
+ *
+ */
+int32_t cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+ void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_dump()
+ *
+ * @brief: Camera IO util for dumping a range of register
+ *
+ * @base_addr: Start register address for the dumping
+ * @start_offset: Start register offset for the dump
+ * @size: Size specifying the range for dumping
+ *
+ * @return: Success or Failure
+ */
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size);
+
+#endif /* _CAM_IO_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera_v3/cam_utils/cam_packet_util.c
new file mode 100644
index 000000000000..acfae3622130
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_packet_util.c
@@ -0,0 +1,264 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_packet_util.h"
+#include "cam_debug_util.h"
+
+int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+ size_t *len)
+{
+ int rc = 0;
+ uintptr_t kmd_buf_addr = 0;
+
+ rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Unable to get the virtual address %d", rc);
+ } else {
+ if (kmd_buf_addr && *len) {
+ *buf_addr = (uint32_t *)kmd_buf_addr;
+ } else {
+ CAM_ERR(CAM_UTIL, "Invalid addr and length :%zd", *len);
+ rc = -ENOMEM;
+ }
+ }
+ return rc;
+}
+
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc)
+{
+ if ((cmd_desc->length > cmd_desc->size) ||
+ (cmd_desc->mem_handle <= 0)) {
+ CAM_ERR(CAM_UTIL, "invalid cmd arg %d %d %d %d",
+ cmd_desc->offset, cmd_desc->length,
+ cmd_desc->mem_handle, cmd_desc->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_packet_util_validate_packet(struct cam_packet *packet)
+{
+ if (!packet)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "num cmd buf:%d num of io config:%d kmd buf index:%d",
+ packet->num_cmd_buf, packet->num_io_configs,
+ packet->kmd_cmd_buf_index);
+
+ if ((packet->kmd_cmd_buf_index >= packet->num_cmd_buf) ||
+ (!packet->header.size) ||
+ (packet->cmd_buf_offset > packet->header.size) ||
+ (packet->io_configs_offset > packet->header.size)) {
+ CAM_ERR(CAM_UTIL, "invalid packet:%d %d %d %d %d",
+ packet->kmd_cmd_buf_index,
+ packet->num_cmd_buf, packet->cmd_buf_offset,
+ packet->io_configs_offset, packet->header.size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+ struct cam_kmd_buf_info *kmd_buf)
+{
+ int rc = 0;
+ size_t len = 0;
+ struct cam_cmd_buf_desc *cmd_desc;
+ uint32_t *cpu_addr;
+
+ if (!packet || !kmd_buf) {
+ CAM_ERR(CAM_UTIL, "Invalid arg %pK %pK", packet, kmd_buf);
+ return -EINVAL;
+ }
+
+ /* Take first command descriptor and add offset to it for kmd*/
+ cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
+ &packet->payload + packet->cmd_buf_offset);
+ cmd_desc += packet->kmd_cmd_buf_index;
+
+ rc = cam_packet_util_validate_cmd_desc(cmd_desc);
+ if (rc)
+ return rc;
+
+ rc = cam_packet_util_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
+ &len);
+ if (rc)
+ return rc;
+
+ if (len < cmd_desc->size) {
+ CAM_ERR(CAM_UTIL, "invalid memory len:%zd and cmd desc size:%d",
+ len, cmd_desc->size);
+ return -EINVAL;
+ }
+
+ cpu_addr += (cmd_desc->offset / 4) + (packet->kmd_cmd_buf_offset / 4);
+ CAM_DBG(CAM_UTIL, "total size %d, cmd size: %d, KMD buffer size: %d",
+ cmd_desc->size, cmd_desc->length,
+ cmd_desc->size - cmd_desc->length);
+ CAM_DBG(CAM_UTIL, "hdl 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK",
+ cmd_desc->mem_handle, cmd_desc->offset,
+ packet->kmd_cmd_buf_offset, cpu_addr);
+
+ kmd_buf->cpu_addr = cpu_addr;
+ kmd_buf->handle = cmd_desc->mem_handle;
+ kmd_buf->offset = cmd_desc->offset + packet->kmd_cmd_buf_offset;
+ kmd_buf->size = cmd_desc->size - cmd_desc->length;
+ kmd_buf->used_bytes = 0;
+
+ return rc;
+}
+
+int cam_packet_util_process_patches(struct cam_packet *packet,
+ int32_t iommu_hdl, int32_t sec_mmu_hdl)
+{
+ struct cam_patch_desc *patch_desc = NULL;
+ dma_addr_t iova_addr;
+ uintptr_t cpu_addr;
+ uint32_t temp;
+ uint32_t *dst_cpu_addr;
+ uint32_t *src_buf_iova_addr;
+ size_t dst_buf_len;
+ size_t src_buf_size;
+ int i;
+ int rc = 0;
+ int32_t hdl;
+
+ /* process patch descriptor */
+ patch_desc = (struct cam_patch_desc *)
+ ((uint32_t *) &packet->payload +
+ packet->patch_offset/4);
+ CAM_DBG(CAM_UTIL, "packet = %pK patch_desc = %pK size = %lu",
+ (void *)packet, (void *)patch_desc,
+ sizeof(struct cam_patch_desc));
+
+ for (i = 0; i < packet->num_patches; i++) {
+
+ hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ?
+ sec_mmu_hdl : iommu_hdl;
+ rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
+ hdl, &iova_addr, &src_buf_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_UTIL, "unable to get src buf address");
+ return rc;
+ }
+ src_buf_iova_addr = (uint32_t *)iova_addr;
+ temp = iova_addr;
+
+ rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl,
+ &cpu_addr, &dst_buf_len);
+ if (rc < 0) {
+ CAM_ERR(CAM_UTIL, "unable to get dst buf address");
+ return rc;
+ }
+ dst_cpu_addr = (uint32_t *)cpu_addr;
+
+ CAM_DBG(CAM_UTIL, "i = %d patch info = %x %x %x %x", i,
+ patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
+ patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
+
+ dst_cpu_addr = (uint32_t *)((uint8_t *)dst_cpu_addr +
+ patch_desc[i].dst_offset);
+ temp += patch_desc[i].src_offset;
+
+ *dst_cpu_addr = temp;
+
+ CAM_DBG(CAM_UTIL,
+ "patch is done for dst %pK with src %pK value %llx",
+ dst_cpu_addr, src_buf_iova_addr,
+ *((uint64_t *)dst_cpu_addr));
+ }
+
+ return rc;
+}
+
+int cam_packet_util_process_generic_cmd_buffer(
+ struct cam_cmd_buf_desc *cmd_buf,
+ cam_packet_generic_blob_handler blob_handler_cb, void *user_data)
+{
+ int rc;
+ uintptr_t cpu_addr;
+ size_t buf_size;
+ uint32_t *blob_ptr;
+ uint32_t blob_type, blob_size, blob_block_size, len_read;
+
+ if (!cmd_buf || !blob_handler_cb) {
+ CAM_ERR(CAM_UTIL, "Invalid args %pK %pK",
+ cmd_buf, blob_handler_cb);
+ return -EINVAL;
+ }
+
+ if (!cmd_buf->length || !cmd_buf->size) {
+ CAM_ERR(CAM_UTIL, "Invalid cmd buf size %d %d",
+ cmd_buf->length, cmd_buf->size);
+ return -EINVAL;
+ }
+
+ rc = cam_mem_get_cpu_buf(cmd_buf->mem_handle, &cpu_addr, &buf_size);
+ if (rc || !cpu_addr || (buf_size == 0)) {
+ CAM_ERR(CAM_UTIL, "Failed in Get cpu addr, rc=%d, cpu_addr=%pK",
+ rc, (void *)cpu_addr);
+ return rc;
+ }
+
+ blob_ptr = (uint32_t *)(((uint8_t *)cpu_addr) +
+ cmd_buf->offset);
+
+ CAM_DBG(CAM_UTIL,
+ "GenericCmdBuffer cpuaddr=%pK, blobptr=%pK, len=%d",
+ (void *)cpu_addr, (void *)blob_ptr, cmd_buf->length);
+
+ len_read = 0;
+ while (len_read < cmd_buf->length) {
+ blob_type =
+ ((*blob_ptr) & CAM_GENERIC_BLOB_CMDBUFFER_TYPE_MASK) >>
+ CAM_GENERIC_BLOB_CMDBUFFER_TYPE_SHIFT;
+ blob_size =
+ ((*blob_ptr) & CAM_GENERIC_BLOB_CMDBUFFER_SIZE_MASK) >>
+ CAM_GENERIC_BLOB_CMDBUFFER_SIZE_SHIFT;
+
+ blob_block_size = sizeof(uint32_t) +
+ (((blob_size + sizeof(uint32_t) - 1) /
+ sizeof(uint32_t)) * sizeof(uint32_t));
+
+ CAM_DBG(CAM_UTIL,
+ "Blob type=%d size=%d block_size=%d len_read=%d total=%d",
+ blob_type, blob_size, blob_block_size, len_read,
+ cmd_buf->length);
+
+ if (len_read + blob_block_size > cmd_buf->length) {
+ CAM_ERR(CAM_UTIL, "Invalid Blob %d %d %d %d",
+ blob_type, blob_size, len_read,
+ cmd_buf->length);
+ return -EINVAL;
+ }
+
+ len_read += blob_block_size;
+
+ rc = blob_handler_cb(user_data, blob_type, blob_size,
+ (uint8_t *)(blob_ptr + 1));
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Error in handling blob type %d %d",
+ blob_type, blob_size);
+ return rc;
+ }
+
+ blob_ptr += (blob_block_size / sizeof(uint32_t));
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera_v3/cam_utils/cam_packet_util.h
new file mode 100644
index 000000000000..b2315232262d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_packet_util.h
@@ -0,0 +1,130 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_PACKET_UTIL_H_
+#define _CAM_PACKET_UTIL_H_
+
+#include <uapi/media/cam_defs.h>
+
+/**
+ * @brief KMD scratch buffer information
+ *
+ * @handle: Memory handle
+ * @cpu_addr: Cpu address
+ * @offset: Offset from the start of the buffer
+ * @size: Size of the buffer
+ * @used_bytes: Used memory in bytes
+ *
+ */
+struct cam_kmd_buf_info {
+ int handle;
+ uint32_t *cpu_addr;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t used_bytes;
+};
+
+/* Generic Cmd Buffer blob callback function type */
+typedef int (*cam_packet_generic_blob_handler)(void *user_data,
+ uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data);
+
+/**
+ * cam_packet_util_get_cmd_mem_addr()
+ *
+ * @brief Get command buffer address
+ *
+ * @handle: Command buffer memory handle
+ * @buf_addr: Command buffer cpu mapped address
+ * @len: Command buffer length
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+ size_t *len);
+
+/**
+ * cam_packet_util_validate_packet()
+ *
+ * @brief Validate the packet
+ *
+ * @packet: Packet to be validated
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_validate_packet(struct cam_packet *packet);
+
+/**
+ * cam_packet_util_validate_cmd_desc()
+ *
+ * @brief Validate the packet
+ *
+ * @cmd_desc: Command descriptor to be validated
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc);
+
+/**
+ * cam_packet_util_get_kmd_buffer()
+ *
+ * @brief Get the kmd buffer from the packet command descriptor
+ *
+ * @packet: Packet data
+ * @kmd_buf: Extracted the KMD buffer information
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+ struct cam_kmd_buf_info *kmd_buf_info);
+
+/**
+ * cam_packet_util_process_patches()
+ *
+ * @brief: Replace the handle in Packet to Address using the
+ * information from patches.
+ *
+ * @packet: Input packet containing Command Buffers and Patches
+ * @iommu_hdl: IOMMU handle of the HW Device that received the packet
+ * @sec_iommu_hdl: Secure IOMMU handle of the HW Device that
+ * received the packet
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_packet_util_process_patches(struct cam_packet *packet,
+ int32_t iommu_hdl, int32_t sec_mmu_hdl);
+
+/**
+ * cam_packet_util_process_generic_cmd_buffer()
+ *
+ * @brief: Process Generic Blob command buffer. This utility
+ * function process the command buffer and calls the
+ * blob_handle_cb callback for each blob that exists
+ * in the command buffer.
+ *
+ * @cmd_buf: Generic Blob Cmd Buffer handle
+ * @blob_handler_cb: Callback pointer to call for each blob exists in the
+ * command buffer
+ * @user_data: User data to be passed while callback
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_packet_util_process_generic_cmd_buffer(
+ struct cam_cmd_buf_desc *cmd_buf,
+ cam_packet_generic_blob_handler blob_handler_cb, void *user_data);
+
+#endif /* _CAM_PACKET_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera_v3/cam_utils/cam_soc_util.c
new file mode 100644
index 000000000000..4972a2a8cd08
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_soc_util.c
@@ -0,0 +1,1700 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <soc/qcom/socinfo.h>
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+#include <linux/nvmem-consumer.h>
+
+uint32_t cam_soc_util_get_soc_id(void)
+{
+ return socinfo_get_id();
+}
+#if defined(CONFIG_NVMEM) && defined(CONFIG_QCOM_QFPROM)
+uint32_t cam_soc_util_get_hw_revision_node(struct cam_hw_soc_info *soc_info)
+{
+ struct nvmem_cell *cell;
+ ssize_t len;
+ uint32_t *buf, hw_rev;
+ struct platform_device *pdev;
+
+ pdev = soc_info->pdev;
+ /* read the soc hw revision and select revision node */
+ cell = nvmem_cell_get(&pdev->dev, "minor_rev");
+ if (IS_ERR_OR_NULL(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER) {
+ CAM_ERR(CAM_UTIL, "Err to get nvmem cell: ret=%ld",
+ PTR_ERR(cell));
+ return -EINVAL;
+ }
+ CAM_ERR(CAM_UTIL, "No DTS entry");
+ return 0;
+ }
+
+ if (PTR_ERR(cell) == -ENOENT) {
+ CAM_DBG(CAM_UTIL, "nvme cell not found");
+ return 0;
+ }
+
+ buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR_OR_NULL(buf)) {
+ CAM_ERR(CAM_UTIL, "Unable to read nvmem cell: ret=%ld",
+ PTR_ERR(buf));
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_UTIL, "hw_rev = %u", *buf);
+ hw_rev = (*buf >> 28) & 0x3;
+ kfree(buf);
+
+ return hw_rev;
+}
+#else
+uint32_t cam_soc_util_get_hw_revision_node(struct cam_hw_soc_info *soc_info)
+{
+ return 0;
+}
+#endif
+
+static char supported_clk_info[256];
+static char debugfs_dir_name[64];
+
+/**
+ * cam_soc_util_get_string_from_level()
+ *
+ * @brief: Returns the string for a given clk level
+ *
+ * @level: Clock level
+ *
+ * @return: String corresponding to the clk level
+ */
+static const char *cam_soc_util_get_string_from_level(
+ enum cam_vote_level level)
+{
+ switch (level) {
+ case CAM_SUSPEND_VOTE:
+ return "";
+ case CAM_MINSVS_VOTE:
+ return "MINSVS[1]";
+ case CAM_LOWSVS_VOTE:
+ return "LOWSVS[2]";
+ case CAM_SVS_VOTE:
+ return "SVS[3]";
+ case CAM_SVSL1_VOTE:
+ return "SVSL1[4]";
+ case CAM_NOMINAL_VOTE:
+ return "NOM[5]";
+ case CAM_NOMINALL1_VOTE:
+ return "NOML1[6]";
+ case CAM_TURBO_VOTE:
+ return "TURBO[7]";
+ default:
+ return "";
+ }
+}
+
+/**
+ * cam_soc_util_get_supported_clk_levels()
+ *
+ * @brief: Returns the string of all the supported clk levels for
+ * the given device
+ *
+ * @soc_info: Device soc information
+ *
+ * @return: String containing all supported clk levels
+ */
+static const char *cam_soc_util_get_supported_clk_levels(
+ struct cam_hw_soc_info *soc_info)
+{
+ int i = 0;
+
+ memset(supported_clk_info, 0, sizeof(supported_clk_info));
+ strlcat(supported_clk_info, "Supported levels: ",
+ sizeof(supported_clk_info));
+
+ for (i = 0; i < CAM_MAX_VOTE; i++) {
+ if (soc_info->clk_level_valid[i] == true) {
+ strlcat(supported_clk_info,
+ cam_soc_util_get_string_from_level(i),
+ sizeof(supported_clk_info));
+ strlcat(supported_clk_info, " ",
+ sizeof(supported_clk_info));
+ }
+ }
+
+ strlcat(supported_clk_info, "\n", sizeof(supported_clk_info));
+ return supported_clk_info;
+}
+
+static int cam_soc_util_clk_lvl_options_open(struct inode *inode,
+ struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t cam_soc_util_clk_lvl_options_read(struct file *file,
+ char __user *clk_info, size_t size_t, loff_t *loff_t)
+{
+ struct cam_hw_soc_info *soc_info =
+ (struct cam_hw_soc_info *)file->private_data;
+ const char *display_string =
+ cam_soc_util_get_supported_clk_levels(soc_info);
+
+ return simple_read_from_buffer(clk_info, size_t, loff_t, display_string,
+ strlen(display_string));
+}
+
+static const struct file_operations cam_soc_util_clk_lvl_options = {
+ .open = cam_soc_util_clk_lvl_options_open,
+ .read = cam_soc_util_clk_lvl_options_read,
+};
+
+static int cam_soc_util_set_clk_lvl(void *data, u64 val)
+{
+ struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data;
+
+ if (val <= CAM_SUSPEND_VOTE || val >= CAM_MAX_VOTE)
+ return 0;
+
+ if (soc_info->clk_level_valid[val] == true)
+ soc_info->clk_level_override = val;
+ else
+ soc_info->clk_level_override = 0;
+
+ return 0;
+}
+
+static int cam_soc_util_get_clk_lvl(void *data, u64 *val)
+{
+ struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data;
+
+ *val = soc_info->clk_level_override;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_soc_util_clk_lvl_control,
+ cam_soc_util_get_clk_lvl, cam_soc_util_set_clk_lvl, "%08llu");
+
+/**
+ * cam_soc_util_create_clk_lvl_debugfs()
+ *
+ * @brief: Creates debugfs files to view/control device clk rates
+ *
+ * @soc_info: Device soc information
+ *
+ * @return: Success or failure
+ */
+static int cam_soc_util_create_clk_lvl_debugfs(
+ struct cam_hw_soc_info *soc_info)
+{
+ struct dentry *dentry = NULL;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_UTIL, "soc info is NULL");
+ return -EINVAL;
+ }
+
+ if (soc_info->dentry)
+ return 0;
+
+ memset(debugfs_dir_name, 0, sizeof(debugfs_dir_name));
+ strlcat(debugfs_dir_name, "clk_dir_", sizeof(debugfs_dir_name));
+ strlcat(debugfs_dir_name, soc_info->dev_name, sizeof(debugfs_dir_name));
+
+ dentry = soc_info->dentry;
+ dentry = debugfs_create_dir(debugfs_dir_name, NULL);
+ if (!dentry) {
+ CAM_ERR(CAM_UTIL, "failed to create debug directory");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_file("clk_lvl_options", 0444,
+ dentry, soc_info, &cam_soc_util_clk_lvl_options)) {
+ CAM_ERR(CAM_UTIL, "failed to create clk_lvl_options");
+ goto err;
+ }
+
+ if (!debugfs_create_file("clk_lvl_control", 0644,
+ dentry, soc_info, &cam_soc_util_clk_lvl_control)) {
+ CAM_ERR(CAM_UTIL, "failed to create clk_lvl_control");
+ goto err;
+ }
+
+ CAM_DBG(CAM_UTIL, "clk lvl debugfs for %s successfully created",
+ soc_info->dev_name);
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(dentry);
+ dentry = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * cam_soc_util_remove_clk_lvl_debugfs()
+ *
+ * @brief: Removes the debugfs files used to view/control
+ * device clk rates
+ *
+ * @soc_info: Device soc information
+ *
+ */
+static void cam_soc_util_remove_clk_lvl_debugfs(
+ struct cam_hw_soc_info *soc_info)
+{
+ debugfs_remove_recursive(soc_info->dentry);
+ soc_info->dentry = NULL;
+}
+
+int cam_soc_util_get_level_from_string(const char *string,
+ enum cam_vote_level *level)
+{
+ if (!level)
+ return -EINVAL;
+
+ if (!strcmp(string, "suspend")) {
+ *level = CAM_SUSPEND_VOTE;
+ } else if (!strcmp(string, "minsvs")) {
+ *level = CAM_MINSVS_VOTE;
+ } else if (!strcmp(string, "lowsvs")) {
+ *level = CAM_LOWSVS_VOTE;
+ } else if (!strcmp(string, "svs")) {
+ *level = CAM_SVS_VOTE;
+ } else if (!strcmp(string, "svs_l1")) {
+ *level = CAM_SVSL1_VOTE;
+ } else if (!strcmp(string, "nominal")) {
+ *level = CAM_NOMINAL_VOTE;
+ } else if (!strcmp(string, "nominal_l1")) {
+ *level = CAM_NOMINALL1_VOTE;
+ } else if (!strcmp(string, "turbo")) {
+ *level = CAM_TURBO_VOTE;
+ } else {
+ CAM_ERR(CAM_UTIL, "Invalid string %s", string);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cam_soc_util_get_clk_level_to_apply()
+ *
+ * @brief: Get the clock level to apply. If the requested level
+ * is not valid, bump the level to next available valid
+ * level. If no higher level found, return failure.
+ *
+ * @soc_info: Device soc struct to be populated
+ * @req_level: Requested level
+ * @apply_level Level to apply
+ *
+ * @return: success or failure
+ */
+static int cam_soc_util_get_clk_level_to_apply(
+ struct cam_hw_soc_info *soc_info, enum cam_vote_level req_level,
+ enum cam_vote_level *apply_level)
+{
+ if (req_level >= CAM_MAX_VOTE) {
+ CAM_ERR(CAM_UTIL, "Invalid clock level parameter %d",
+ req_level);
+ return -EINVAL;
+ }
+
+ if (soc_info->clk_level_valid[req_level] == true) {
+ *apply_level = req_level;
+ } else {
+ int i;
+
+ for (i = (req_level + 1); i < CAM_MAX_VOTE; i++)
+ if (soc_info->clk_level_valid[i] == true) {
+ *apply_level = i;
+ break;
+ }
+
+ if (i == CAM_MAX_VOTE) {
+ CAM_ERR(CAM_UTIL,
+ "No valid clock level found to apply, req=%d",
+ req_level);
+ return -EINVAL;
+ }
+ }
+
+ CAM_DBG(CAM_UTIL, "Req level %d, Applying %d",
+ req_level, *apply_level);
+
+ return 0;
+}
+
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
+{
+ if (!soc_info) {
+ CAM_ERR(CAM_UTIL, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (!soc_info->irq_line) {
+ CAM_ERR(CAM_UTIL, "No IRQ line available");
+ return -ENODEV;
+ }
+
+ enable_irq(soc_info->irq_line->start);
+
+ return 0;
+}
+
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
+{
+ if (!soc_info) {
+ CAM_ERR(CAM_UTIL, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (!soc_info->irq_line) {
+ CAM_ERR(CAM_UTIL, "No IRQ line available");
+ return -ENODEV;
+ }
+
+ disable_irq(soc_info->irq_line->start);
+
+ return 0;
+}
+
+long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_index, unsigned long clk_rate)
+{
+ if (!soc_info || (clk_index >= soc_info->num_clk) || (clk_rate == 0)) {
+ CAM_ERR(CAM_UTIL, "Invalid input params %pK, %d %lu",
+ soc_info, clk_index, clk_rate);
+ return clk_rate;
+ }
+
+ return clk_round_rate(soc_info->clk[clk_index], clk_rate);
+}
+
+int cam_soc_util_set_clk_flags(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_index, unsigned long flags)
+{
+ if (!soc_info || (clk_index >= soc_info->num_clk)) {
+ CAM_ERR(CAM_UTIL, "Invalid input params %pK, %d",
+ soc_info, clk_index);
+ return -EINVAL;
+ }
+
+ return clk_set_flags(soc_info->clk[clk_index], flags);
+}
+
+/**
+ * cam_soc_util_set_clk_rate()
+ *
+ * @brief: Sets the given rate for the clk requested for
+ *
+ * @clk: Clock structure information for which rate is to be set
+ * @clk_name: Name of the clock for which rate is being set
+ * @clk_rate Clock rate to be set
+ *
+ * @return: Success or failure
+ */
+static int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
+ int32_t clk_rate)
+{
+ int rc = 0;
+ long clk_rate_round;
+
+ if (!clk || !clk_name)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "set %s, rate %d", clk_name, clk_rate);
+ if (clk_rate > 0) {
+ clk_rate_round = clk_round_rate(clk, clk_rate);
+ CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round);
+ if (clk_rate_round < 0) {
+ CAM_ERR(CAM_UTIL, "round failed for clock %s rc = %ld",
+ clk_name, clk_rate_round);
+ return clk_rate_round;
+ }
+ rc = clk_set_rate(clk, clk_rate_round);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
+ return rc;
+ }
+ } else if (clk_rate == INIT_RATE) {
+ clk_rate_round = clk_get_rate(clk);
+ CAM_DBG(CAM_UTIL, "init new_rate %ld", clk_rate_round);
+ if (clk_rate_round == 0) {
+ clk_rate_round = clk_round_rate(clk, 0);
+ if (clk_rate_round <= 0) {
+ CAM_ERR(CAM_UTIL, "round rate failed on %s",
+ clk_name);
+ return clk_rate_round;
+ }
+ }
+ rc = clk_set_rate(clk, clk_rate_round);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info,
+ int32_t clk_rate)
+{
+ int32_t src_clk_idx;
+ struct clk *clk = NULL;
+
+ if (!soc_info || (soc_info->src_clk_idx < 0))
+ return -EINVAL;
+
+ if (soc_info->clk_level_override && clk_rate)
+ clk_rate = soc_info->clk_level_override;
+
+ src_clk_idx = soc_info->src_clk_idx;
+ clk = soc_info->clk[src_clk_idx];
+
+ return cam_soc_util_set_clk_rate(clk,
+ soc_info->clk_name[src_clk_idx], clk_rate);
+
+}
+
+int cam_soc_util_clk_put(struct clk **clk)
+{
+ if (!(*clk)) {
+ CAM_ERR(CAM_UTIL, "Invalid params clk");
+ return -EINVAL;
+ }
+
+ clk_put(*clk);
+ *clk = NULL;
+
+ return 0;
+}
+
+static struct clk *cam_soc_util_option_clk_get(struct device_node *np,
+ int index)
+{
+ struct of_phandle_args clkspec;
+ struct clk *clk;
+ int rc;
+
+ if (index < 0)
+ return ERR_PTR(-EINVAL);
+
+ rc = of_parse_phandle_with_args(np, "clocks-option", "#clock-cells",
+ index, &clkspec);
+ if (rc)
+ return ERR_PTR(rc);
+
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+
+ return clk;
+}
+
+int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
+ const char *clk_name, struct clk **clk, int32_t *clk_index,
+ int32_t *clk_rate)
+{
+ int index = 0;
+ int rc = 0;
+ struct device_node *of_node = NULL;
+
+ if (!soc_info || !clk_name || !clk) {
+ CAM_ERR(CAM_UTIL,
+ "Invalid params soc_info %pK clk_name %s clk %pK",
+ soc_info, clk_name, clk);
+ return -EINVAL;
+ }
+
+ of_node = soc_info->dev->of_node;
+
+ index = of_property_match_string(of_node, "clock-names-option",
+ clk_name);
+
+ *clk = cam_soc_util_option_clk_get(of_node, index);
+ if (IS_ERR(*clk)) {
+ CAM_ERR(CAM_UTIL, "No clk named %s found. Dev %s", clk_name,
+ soc_info->dev_name);
+ *clk_index = -1;
+ return -EFAULT;
+ }
+ *clk_index = index;
+
+ rc = of_property_read_u32_index(of_node, "clock-rates-option",
+ index, clk_rate);
+ if (rc) {
+ CAM_ERR(CAM_UTIL,
+ "Error reading clock-rates clk_name %s index %d",
+ clk_name, index);
+ cam_soc_util_clk_put(clk);
+ *clk_rate = 0;
+ return rc;
+ }
+
+ /*
+ * Option clocks are assumed to be available to single Device here.
+ * Hence use INIT_RATE instead of NO_SET_RATE.
+ */
+ *clk_rate = (*clk_rate == 0) ? (int32_t)INIT_RATE : *clk_rate;
+
+ CAM_DBG(CAM_UTIL, "clk_name %s index %d clk_rate %d",
+ clk_name, *clk_index, *clk_rate);
+
+ return 0;
+}
+
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+ int32_t clk_rate)
+{
+ int rc = 0;
+
+ if (!clk || !clk_name)
+ return -EINVAL;
+
+ rc = cam_soc_util_set_clk_rate(clk, clk_name, clk_rate);
+ if (rc)
+ return rc;
+
+ rc = clk_prepare_enable(clk);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "enable failed for %s: rc(%d)", clk_name, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name)
+{
+ if (!clk || !clk_name)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "disable %s", clk_name);
+ clk_disable_unprepare(clk);
+
+ return 0;
+}
+
+/**
+ * cam_soc_util_clk_enable_default()
+ *
+ * @brief: This function enables the default clocks present
+ * in soc_info
+ *
+ * @soc_info: Device soc struct to be populated
+ * @clk_level: Clk level to apply while enabling
+ *
+ * @return: success or failure
+ */
+int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level)
+{
+ int i, rc = 0;
+ enum cam_vote_level apply_level;
+
+ if ((soc_info->num_clk == 0) ||
+ (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+ CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+ soc_info->num_clk);
+ return -EINVAL;
+ }
+
+ rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+ &apply_level);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < soc_info->num_clk; i++) {
+ rc = cam_soc_util_clk_enable(soc_info->clk[i],
+ soc_info->clk_name[i],
+ soc_info->clk_rate[apply_level][i]);
+ if (rc)
+ goto clk_disable;
+ }
+
+ return rc;
+
+clk_disable:
+ for (i--; i >= 0; i--) {
+ cam_soc_util_clk_disable(soc_info->clk[i],
+ soc_info->clk_name[i]);
+ }
+
+ return rc;
+}
+
+/**
+ * cam_soc_util_clk_disable_default()
+ *
+ * @brief: This function disables the default clocks present
+ * in soc_info
+ *
+ * @soc_info: device soc struct to be populated
+ *
+ * @return: success or failure
+ */
+void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
+{
+ int i;
+
+ if (soc_info->num_clk == 0)
+ return;
+
+ for (i = soc_info->num_clk - 1; i >= 0; i--)
+ cam_soc_util_clk_disable(soc_info->clk[i],
+ soc_info->clk_name[i]);
+}
+
+/**
+ * cam_soc_util_get_dt_clk_info()
+ *
+ * @brief: Parse the DT and populate the Clock properties
+ *
+ * @soc_info: device soc struct to be populated
+ * @src_clk_str name of src clock that has rate control
+ *
+ * @return: success or failure
+ */
+static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
+{
+ struct device_node *of_node = NULL;
+ int count;
+ int num_clk_rates, num_clk_levels;
+ int i, j, rc;
+ int32_t num_clk_level_strings;
+ const char *src_clk_str = NULL;
+ const char *clk_control_debugfs = NULL;
+ const char *clk_cntl_lvl_string = NULL;
+ enum cam_vote_level level;
+
+ if (!soc_info || !soc_info->dev)
+ return -EINVAL;
+
+ of_node = soc_info->dev->of_node;
+
+ if (!of_property_read_bool(of_node, "use-shared-clk")) {
+ CAM_DBG(CAM_UTIL, "No shared clk parameter defined");
+ soc_info->use_shared_clk = false;
+ } else {
+ soc_info->use_shared_clk = true;
+ }
+
+ count = of_property_count_strings(of_node, "clock-names");
+
+ CAM_DBG(CAM_UTIL, "count = %d", count);
+ if (count > CAM_SOC_MAX_CLK) {
+ CAM_ERR(CAM_UTIL, "invalid count of clocks, count=%d", count);
+ rc = -EINVAL;
+ return rc;
+ }
+ if (count <= 0) {
+ CAM_DBG(CAM_UTIL, "No clock-names found");
+ count = 0;
+ soc_info->num_clk = count;
+ return 0;
+ }
+ soc_info->num_clk = count;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &(soc_info->clk_name[i]));
+ CAM_DBG(CAM_UTIL, "clock-names[%d] = %s",
+ i, soc_info->clk_name[i]);
+ if (rc) {
+ CAM_ERR(CAM_UTIL,
+ "i= %d count= %d reading clock-names failed",
+ i, count);
+ return rc;
+ }
+ }
+
+ num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates");
+ if (num_clk_rates <= 0) {
+ CAM_ERR(CAM_UTIL, "reading clock-rates count failed");
+ return -EINVAL;
+ }
+
+ if ((num_clk_rates % soc_info->num_clk) != 0) {
+ CAM_ERR(CAM_UTIL,
+ "mismatch clk/rates, No of clocks=%d, No of rates=%d",
+ soc_info->num_clk, num_clk_rates);
+ return -EINVAL;
+ }
+
+ num_clk_levels = (num_clk_rates / soc_info->num_clk);
+
+ num_clk_level_strings = of_property_count_strings(of_node,
+ "clock-cntl-level");
+ if (num_clk_level_strings != num_clk_levels) {
+ CAM_ERR(CAM_UTIL,
+ "Mismatch No of levels=%d, No of level string=%d",
+ num_clk_levels, num_clk_level_strings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_clk_levels; i++) {
+ rc = of_property_read_string_index(of_node,
+ "clock-cntl-level", i, &clk_cntl_lvl_string);
+ if (rc) {
+ CAM_ERR(CAM_UTIL,
+ "Error reading clock-cntl-level, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_soc_util_get_level_from_string(clk_cntl_lvl_string,
+ &level);
+ if (rc)
+ return rc;
+
+ CAM_DBG(CAM_UTIL,
+ "[%d] : %s %d", i, clk_cntl_lvl_string, level);
+ soc_info->clk_level_valid[level] = true;
+ for (j = 0; j < soc_info->num_clk; j++) {
+ rc = of_property_read_u32_index(of_node, "clock-rates",
+ ((i * soc_info->num_clk) + j),
+ &soc_info->clk_rate[level][j]);
+ if (rc) {
+ CAM_ERR(CAM_UTIL,
+ "Error reading clock-rates, rc=%d",
+ rc);
+ return rc;
+ }
+
+ soc_info->clk_rate[level][j] =
+ (soc_info->clk_rate[level][j] == 0) ?
+ (int32_t)NO_SET_RATE :
+ soc_info->clk_rate[level][j];
+
+ CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d",
+ level, j,
+ soc_info->clk_rate[level][j]);
+ }
+ }
+
+ soc_info->src_clk_idx = -1;
+ rc = of_property_read_string_index(of_node, "src-clock-name", 0,
+ &src_clk_str);
+ if (rc || !src_clk_str) {
+ CAM_DBG(CAM_UTIL, "No src_clk_str found");
+ rc = 0;
+ goto end;
+ }
+
+ for (i = 0; i < soc_info->num_clk; i++) {
+ if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) {
+ soc_info->src_clk_idx = i;
+ CAM_DBG(CAM_UTIL, "src clock = %s, index = %d",
+ src_clk_str, i);
+ break;
+ }
+ }
+
+ rc = of_property_read_string_index(of_node,
+ "clock-control-debugfs", 0, &clk_control_debugfs);
+ if (rc || !clk_control_debugfs) {
+ CAM_DBG(CAM_UTIL, "No clock_control_debugfs property found");
+ rc = 0;
+ goto end;
+ }
+
+ if (strcmp("true", clk_control_debugfs) == 0)
+ soc_info->clk_control_enable = true;
+
+end:
+ return rc;
+}
+
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level)
+{
+ int i, rc = 0;
+ enum cam_vote_level apply_level;
+
+ if ((soc_info->num_clk == 0) ||
+ (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+ CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+ soc_info->num_clk);
+ return -EINVAL;
+ }
+
+ rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+ &apply_level);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < soc_info->num_clk; i++) {
+ rc = cam_soc_util_set_clk_rate(soc_info->clk[i],
+ soc_info->clk_name[i],
+ soc_info->clk_rate[apply_level][i]);
+ if (rc)
+ break;
+ }
+
+ return rc;
+};
+
+static int cam_soc_util_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct cam_soc_gpio_data *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ if (!of_get_property(of_node, "gpio-req-tbl-num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+ if (!count) {
+ CAM_ERR(CAM_UTIL, "gpio-req-tbl-num 0");
+ return 0;
+ }
+
+ val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!val_array)
+ return -ENOMEM;
+
+ gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
+ GFP_KERNEL);
+ if (!gconf->cam_gpio_req_tbl) {
+ rc = -ENOMEM;
+ goto free_val_array;
+ }
+ gconf->cam_gpio_req_tbl_size = count;
+
+ rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num",
+ val_array, count);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "failed in reading gpio-req-tbl-num, rc = %d",
+ rc);
+ goto free_gpio_req_tbl;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (val_array[i] >= gpio_array_size) {
+ CAM_ERR(CAM_UTIL, "gpio req tbl index %d invalid",
+ val_array[i]);
+ goto free_gpio_req_tbl;
+ }
+ gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
+ CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].gpio = %d", i,
+ gconf->cam_gpio_req_tbl[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags",
+ val_array, count);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Failed in gpio-req-tbl-flags, rc %d", rc);
+ goto free_gpio_req_tbl;
+ }
+
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+ CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].flags = %ld", i,
+ gconf->cam_gpio_req_tbl[i].flags);
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "gpio-req-tbl-label", i,
+ &gconf->cam_gpio_req_tbl[i].label);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Failed rc %d", rc);
+ goto free_gpio_req_tbl;
+ }
+ CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].label = %s", i,
+ gconf->cam_gpio_req_tbl[i].label);
+ }
+
+ kfree(val_array);
+
+ return rc;
+
+free_gpio_req_tbl:
+ kfree(gconf->cam_gpio_req_tbl);
+free_val_array:
+ kfree(val_array);
+ gconf->cam_gpio_req_tbl_size = 0;
+
+ return rc;
+}
+
+static int cam_soc_util_get_gpio_info(struct cam_hw_soc_info *soc_info)
+{
+ int32_t rc = 0, i = 0;
+ uint16_t *gpio_array = NULL;
+ int16_t gpio_array_size = 0;
+ struct cam_soc_gpio_data *gconf = NULL;
+ struct device_node *of_node = NULL;
+
+ if (!soc_info || !soc_info->dev)
+ return -EINVAL;
+
+ of_node = soc_info->dev->of_node;
+
+ /* Validate input parameters */
+ if (!of_node) {
+ CAM_ERR(CAM_UTIL, "Invalid param of_node");
+ return -EINVAL;
+ }
+
+ gpio_array_size = of_gpio_count(of_node);
+
+ if (gpio_array_size <= 0)
+ return 0;
+
+ CAM_DBG(CAM_UTIL, "gpio count %d", gpio_array_size);
+
+ gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
+ if (!gpio_array)
+ goto free_gpio_conf;
+
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ CAM_DBG(CAM_UTIL, "gpio_array[%d] = %d", i, gpio_array[i]);
+ }
+
+ gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
+ if (!gconf)
+ return -ENOMEM;
+
+ rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
+ gpio_array_size);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "failed in msm_camera_get_dt_gpio_req_tbl");
+ goto free_gpio_array;
+ }
+
+ gconf->cam_gpio_common_tbl = kcalloc(gpio_array_size,
+ sizeof(struct gpio), GFP_KERNEL);
+ if (!gconf->cam_gpio_common_tbl) {
+ rc = -ENOMEM;
+ goto free_gpio_array;
+ }
+
+ for (i = 0; i < gpio_array_size; i++)
+ gconf->cam_gpio_common_tbl[i].gpio = gpio_array[i];
+
+ gconf->cam_gpio_common_tbl_size = gpio_array_size;
+ soc_info->gpio_data = gconf;
+ kfree(gpio_array);
+
+ return rc;
+
+free_gpio_array:
+ kfree(gpio_array);
+free_gpio_conf:
+ kfree(gconf);
+ soc_info->gpio_data = NULL;
+
+ return rc;
+}
+
+static int cam_soc_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, bool gpio_en)
+{
+ int rc = 0, i = 0;
+ uint8_t size = 0;
+ struct cam_soc_gpio_data *gpio_conf =
+ soc_info->gpio_data;
+ struct gpio *gpio_tbl = NULL;
+
+
+ if (!gpio_conf) {
+ CAM_DBG(CAM_UTIL, "No GPIO entry");
+ return 0;
+ }
+ if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+ CAM_ERR(CAM_UTIL, "GPIO table size is invalid");
+ return -EINVAL;
+ }
+ size = gpio_conf->cam_gpio_req_tbl_size;
+ gpio_tbl = gpio_conf->cam_gpio_req_tbl;
+
+ if (!gpio_tbl || !size) {
+ CAM_ERR(CAM_UTIL, "Invalid gpio_tbl %pK / size %d",
+ gpio_tbl, size);
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i++) {
+ CAM_DBG(CAM_UTIL, "i=%d, gpio=%d dir=%ld", i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ }
+ if (gpio_en) {
+ for (i = 0; i < size; i++) {
+ rc = gpio_request_one(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags, gpio_tbl[i].label);
+ if (rc) {
+ /*
+ * After GPIO request fails, contine to
+ * apply new gpios, outout a error message
+ * for driver bringup debug
+ */
+ CAM_ERR(CAM_UTIL, "gpio %d:%s request fails",
+ gpio_tbl[i].gpio, gpio_tbl[i].label);
+ }
+ }
+ } else {
+ gpio_free_array(gpio_tbl, size);
+ }
+
+ return rc;
+}
+
+static int cam_soc_util_get_dt_regulator_info
+ (struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0, count = 0, i = 0;
+ struct device_node *of_node = NULL;
+
+ if (!soc_info || !soc_info->dev) {
+ CAM_ERR(CAM_UTIL, "Invalid parameters");
+ return -EINVAL;
+ }
+
+ of_node = soc_info->dev->of_node;
+
+ soc_info->num_rgltr = 0;
+ count = of_property_count_strings(of_node, "regulator-names");
+ if (count != -EINVAL) {
+ if (count <= 0) {
+ CAM_ERR(CAM_UTIL, "no regulators found");
+ count = 0;
+ return -EINVAL;
+ }
+
+ soc_info->num_rgltr = count;
+
+ } else {
+ CAM_DBG(CAM_UTIL, "No regulators node found");
+ return 0;
+ }
+
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ rc = of_property_read_string_index(of_node,
+ "regulator-names", i, &soc_info->rgltr_name[i]);
+ CAM_DBG(CAM_UTIL, "rgltr_name[%d] = %s",
+ i, soc_info->rgltr_name[i]);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "no regulator resource at cnt=%d", i);
+ return -ENODEV;
+ }
+ }
+
+ if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) {
+ CAM_DBG(CAM_UTIL, "No regulator control parameter defined");
+ soc_info->rgltr_ctrl_support = false;
+ return 0;
+ }
+
+ soc_info->rgltr_ctrl_support = true;
+
+ rc = of_property_read_u32_array(of_node, "rgltr-min-voltage",
+ soc_info->rgltr_min_volt, soc_info->num_rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "No minimum volatage value found, rc=%d", rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "rgltr-max-voltage",
+ soc_info->rgltr_max_volt, soc_info->num_rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "No maximum volatage value found, rc=%d", rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "rgltr-load-current",
+ soc_info->rgltr_op_mode, soc_info->num_rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "No Load curent found rc=%d", rc);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+ struct device_node *of_node = NULL;
+ int count = 0, i = 0, rc = 0;
+
+ if (!soc_info || !soc_info->dev)
+ return -EINVAL;
+
+ of_node = soc_info->dev->of_node;
+
+ rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "device %s failed to read cell-index",
+ soc_info->dev_name);
+ return rc;
+ }
+
+ count = of_property_count_strings(of_node, "reg-names");
+ if (count <= 0) {
+ CAM_DBG(CAM_UTIL, "no reg-names found for: %s",
+ soc_info->dev_name);
+ count = 0;
+ }
+ soc_info->num_mem_block = count;
+
+ for (i = 0; i < soc_info->num_mem_block; i++) {
+ rc = of_property_read_string_index(of_node, "reg-names", i,
+ &soc_info->mem_block_name[i]);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "failed to read reg-names at %d", i);
+ return rc;
+ }
+ soc_info->mem_block[i] =
+ platform_get_resource_byname(soc_info->pdev,
+ IORESOURCE_MEM, soc_info->mem_block_name[i]);
+
+ if (!soc_info->mem_block[i]) {
+ CAM_ERR(CAM_UTIL, "no mem resource by name %s",
+ soc_info->mem_block_name[i]);
+ rc = -ENODEV;
+ return rc;
+ }
+ }
+
+ if (soc_info->num_mem_block > 0) {
+ rc = of_property_read_u32_array(of_node, "reg-cam-base",
+ soc_info->mem_block_cam_base, soc_info->num_mem_block);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Error reading register offsets");
+ return rc;
+ }
+ }
+
+ rc = of_property_read_string_index(of_node, "interrupt-names", 0,
+ &soc_info->irq_name);
+ if (rc) {
+ CAM_DBG(CAM_UTIL, "No interrupt line preset for: %s",
+ soc_info->dev_name);
+ rc = 0;
+ } else {
+ soc_info->irq_line =
+ platform_get_resource_byname(soc_info->pdev,
+ IORESOURCE_IRQ, soc_info->irq_name);
+ if (!soc_info->irq_line) {
+ CAM_ERR(CAM_UTIL, "no irq resource");
+ rc = -ENODEV;
+ return rc;
+ }
+ }
+
+ rc = cam_soc_util_get_dt_regulator_info(soc_info);
+ if (rc)
+ return rc;
+
+ rc = cam_soc_util_get_dt_clk_info(soc_info);
+ if (rc)
+ return rc;
+
+ rc = cam_soc_util_get_gpio_info(soc_info);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/**
+ * cam_soc_util_get_regulator()
+ *
+ * @brief: Get regulator resource named vdd
+ *
+ * @dev: Device associated with regulator
+ * @reg: Return pointer to be filled with regulator on success
+ * @rgltr_name: Name of regulator to get
+ *
+ * @return: 0 for Success, negative value for failure
+ */
+static int cam_soc_util_get_regulator(struct device *dev,
+ struct regulator **reg, const char *rgltr_name)
+{
+ int rc = 0;
+ *reg = regulator_get(dev, rgltr_name);
+ if (IS_ERR_OR_NULL(*reg)) {
+ rc = PTR_ERR(*reg);
+ rc = rc ? rc : -EINVAL;
+ CAM_ERR(CAM_UTIL, "Regulator %s get failed %d", rgltr_name, rc);
+ *reg = NULL;
+ }
+ return rc;
+}
+
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+ const char *rgltr_name, uint32_t rgltr_min_volt,
+ uint32_t rgltr_max_volt, uint32_t rgltr_op_mode,
+ uint32_t rgltr_delay_ms)
+{
+ int32_t rc = 0;
+
+ if (!rgltr) {
+ CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
+ return -EINVAL;
+ }
+
+ rc = regulator_disable(rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s regulator disable failed", rgltr_name);
+ return rc;
+ }
+
+ if (rgltr_delay_ms > 20)
+ msleep(rgltr_delay_ms);
+ else if (rgltr_delay_ms)
+ usleep_range(rgltr_delay_ms * 1000,
+ (rgltr_delay_ms * 1000) + 1000);
+
+ if (regulator_count_voltages(rgltr) > 0) {
+ regulator_set_load(rgltr, 0);
+ regulator_set_voltage(rgltr, 0, rgltr_max_volt);
+ }
+
+ return rc;
+}
+
+
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay)
+{
+ int32_t rc = 0;
+
+ if (!rgltr) {
+ CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
+ return -EINVAL;
+ }
+
+ if (regulator_count_voltages(rgltr) > 0) {
+ CAM_DBG(CAM_UTIL, "voltage min=%d, max=%d",
+ rgltr_min_volt, rgltr_max_volt);
+
+ rc = regulator_set_voltage(
+ rgltr, rgltr_min_volt, rgltr_max_volt);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s set voltage failed", rgltr_name);
+ return rc;
+ }
+
+ rc = regulator_set_load(rgltr, rgltr_op_mode);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s set optimum mode failed",
+ rgltr_name);
+ return rc;
+ }
+ }
+
+ rc = regulator_enable(rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s regulator_enable failed", rgltr_name);
+ return rc;
+ }
+
+ if (rgltr_delay > 20)
+ msleep(rgltr_delay);
+ else if (rgltr_delay)
+ usleep_range(rgltr_delay * 1000,
+ (rgltr_delay * 1000) + 1000);
+
+ return rc;
+}
+
+static int cam_soc_util_request_pinctrl(
+ struct cam_hw_soc_info *soc_info) {
+
+ struct cam_soc_pinctrl_info *device_pctrl = &soc_info->pinctrl_info;
+ struct device *dev = soc_info->dev;
+
+ device_pctrl->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) {
+ CAM_DBG(CAM_UTIL, "Pinctrl not available");
+ device_pctrl->pinctrl = NULL;
+ return 0;
+ }
+ device_pctrl->gpio_state_active =
+ pinctrl_lookup_state(device_pctrl->pinctrl,
+ CAM_SOC_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(device_pctrl->gpio_state_active)) {
+ CAM_ERR(CAM_UTIL,
+ "Failed to get the active state pinctrl handle");
+ device_pctrl->gpio_state_active = NULL;
+ return -EINVAL;
+ }
+ device_pctrl->gpio_state_suspend
+ = pinctrl_lookup_state(device_pctrl->pinctrl,
+ CAM_SOC_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(device_pctrl->gpio_state_suspend)) {
+ CAM_ERR(CAM_UTIL,
+ "Failed to get the suspend state pinctrl handle");
+ device_pctrl->gpio_state_suspend = NULL;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void cam_soc_util_regulator_disable_default(
+ struct cam_hw_soc_info *soc_info)
+{
+ int j = 0;
+ uint32_t num_rgltr = soc_info->num_rgltr;
+
+ for (j = num_rgltr-1; j >= 0; j--) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ cam_soc_util_regulator_disable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ regulator_disable(soc_info->rgltr[j]);
+ }
+ }
+}
+
+static int cam_soc_util_regulator_enable_default(
+ struct cam_hw_soc_info *soc_info)
+{
+ int j = 0, rc = 0;
+ uint32_t num_rgltr = soc_info->num_rgltr;
+
+ for (j = 0; j < num_rgltr; j++) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ rc = cam_soc_util_regulator_enable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ rc = regulator_enable(soc_info->rgltr[j]);
+ }
+
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s enable failed",
+ soc_info->rgltr_name[j]);
+ goto disable_rgltr;
+ }
+ }
+
+ return rc;
+disable_rgltr:
+
+ for (j--; j >= 0; j--) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ cam_soc_util_regulator_disable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ regulator_disable(soc_info->rgltr[j]);
+ }
+ }
+
+ return rc;
+}
+
+int cam_soc_util_request_platform_resource(
+ struct cam_hw_soc_info *soc_info,
+ irq_handler_t handler, void *irq_data)
+{
+ int i = 0, rc = 0;
+
+ if (!soc_info || !soc_info->dev) {
+ CAM_ERR(CAM_UTIL, "Invalid parameters");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < soc_info->num_mem_block; i++) {
+ if (soc_info->reserve_mem) {
+ if (!request_mem_region(soc_info->mem_block[i]->start,
+ resource_size(soc_info->mem_block[i]),
+ soc_info->mem_block_name[i])){
+ CAM_ERR(CAM_UTIL,
+ "Error Mem region request Failed:%s",
+ soc_info->mem_block_name[i]);
+ rc = -ENOMEM;
+ goto unmap_base;
+ }
+ }
+ soc_info->reg_map[i].mem_base = ioremap(
+ soc_info->mem_block[i]->start,
+ resource_size(soc_info->mem_block[i]));
+ if (!soc_info->reg_map[i].mem_base) {
+ CAM_ERR(CAM_UTIL, "i= %d base NULL", i);
+ rc = -ENOMEM;
+ goto unmap_base;
+ }
+ soc_info->reg_map[i].mem_cam_base =
+ soc_info->mem_block_cam_base[i];
+ soc_info->reg_map[i].size =
+ resource_size(soc_info->mem_block[i]);
+ soc_info->num_reg_map++;
+ }
+
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ if (soc_info->rgltr_name[i] == NULL) {
+ CAM_ERR(CAM_UTIL, "can't find regulator name");
+ goto put_regulator;
+ }
+
+ rc = cam_soc_util_get_regulator(soc_info->dev,
+ &soc_info->rgltr[i],
+ soc_info->rgltr_name[i]);
+ if (rc)
+ goto put_regulator;
+ }
+
+ if (soc_info->irq_line) {
+ rc = devm_request_irq(soc_info->dev, soc_info->irq_line->start,
+ handler, IRQF_TRIGGER_RISING,
+ soc_info->irq_name, irq_data);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "irq request fail");
+ rc = -EBUSY;
+ goto put_regulator;
+ }
+ disable_irq(soc_info->irq_line->start);
+ soc_info->irq_data = irq_data;
+ }
+
+ /* Get Clock */
+ for (i = 0; i < soc_info->num_clk; i++) {
+ soc_info->clk[i] = clk_get(soc_info->dev,
+ soc_info->clk_name[i]);
+ if (!soc_info->clk[i]) {
+ CAM_ERR(CAM_UTIL, "get failed for %s",
+ soc_info->clk_name[i]);
+ rc = -ENOENT;
+ goto put_clk;
+ }
+ }
+
+ rc = cam_soc_util_request_pinctrl(soc_info);
+ if (rc)
+ CAM_DBG(CAM_UTIL, "Failed in request pinctrl, rc=%d", rc);
+
+ rc = cam_soc_util_request_gpio_table(soc_info, true);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Failed in request gpio table, rc=%d", rc);
+ goto put_clk;
+ }
+
+ if (soc_info->clk_control_enable)
+ cam_soc_util_create_clk_lvl_debugfs(soc_info);
+
+ return rc;
+
+put_clk:
+ if (i == -1)
+ i = soc_info->num_clk;
+ for (i = i - 1; i >= 0; i--) {
+ if (soc_info->clk[i]) {
+ clk_put(soc_info->clk[i]);
+ soc_info->clk[i] = NULL;
+ }
+ }
+
+ if (soc_info->irq_line) {
+ disable_irq(soc_info->irq_line->start);
+ devm_free_irq(soc_info->dev,
+ soc_info->irq_line->start, irq_data);
+ }
+
+put_regulator:
+ if (i == -1)
+ i = soc_info->num_rgltr;
+ for (i = i - 1; i >= 0; i--) {
+ if (soc_info->rgltr[i]) {
+ regulator_disable(soc_info->rgltr[i]);
+ regulator_put(soc_info->rgltr[i]);
+ soc_info->rgltr[i] = NULL;
+ }
+ }
+
+unmap_base:
+ if (i == -1)
+ i = soc_info->num_reg_map;
+ for (i = i - 1; i >= 0; i--) {
+ if (soc_info->reserve_mem)
+ release_mem_region(soc_info->mem_block[i]->start,
+ resource_size(soc_info->mem_block[i]));
+ iounmap(soc_info->reg_map[i].mem_base);
+ soc_info->reg_map[i].mem_base = NULL;
+ soc_info->reg_map[i].size = 0;
+ }
+
+ return rc;
+}
+
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+ int i;
+
+ if (!soc_info || !soc_info->dev) {
+ CAM_ERR(CAM_UTIL, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ for (i = soc_info->num_clk - 1; i >= 0; i--) {
+ clk_put(soc_info->clk[i]);
+ soc_info->clk[i] = NULL;
+ }
+
+ for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
+ if (soc_info->rgltr[i]) {
+ regulator_put(soc_info->rgltr[i]);
+ soc_info->rgltr[i] = NULL;
+ }
+ }
+
+ for (i = soc_info->num_reg_map - 1; i >= 0; i--) {
+ iounmap(soc_info->reg_map[i].mem_base);
+ soc_info->reg_map[i].mem_base = NULL;
+ soc_info->reg_map[i].size = 0;
+ }
+
+ if (soc_info->irq_line) {
+ disable_irq(soc_info->irq_line->start);
+ devm_free_irq(soc_info->dev,
+ soc_info->irq_line->start, soc_info->irq_data);
+ }
+
+ if (soc_info->pinctrl_info.pinctrl)
+ devm_pinctrl_put(soc_info->pinctrl_info.pinctrl);
+
+
+ /* release for gpio */
+ cam_soc_util_request_gpio_table(soc_info, false);
+
+ if (soc_info->clk_control_enable)
+ cam_soc_util_remove_clk_lvl_debugfs(soc_info);
+
+ return 0;
+}
+
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+ bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq)
+{
+ int rc = 0;
+
+ if (!soc_info)
+ return -EINVAL;
+
+ rc = cam_soc_util_regulator_enable_default(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Regulators enable failed");
+ return rc;
+ }
+
+ if (enable_clocks) {
+ rc = cam_soc_util_clk_enable_default(soc_info, clk_level);
+ if (rc)
+ goto disable_regulator;
+ }
+
+ if (enable_irq) {
+ rc = cam_soc_util_irq_enable(soc_info);
+ if (rc)
+ goto disable_clk;
+ }
+
+ if (soc_info->pinctrl_info.pinctrl &&
+ soc_info->pinctrl_info.gpio_state_active) {
+ rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+ soc_info->pinctrl_info.gpio_state_active);
+
+ if (rc)
+ goto disable_irq;
+ }
+
+ return rc;
+
+disable_irq:
+ if (enable_irq)
+ cam_soc_util_irq_disable(soc_info);
+
+disable_clk:
+ if (enable_clocks)
+ cam_soc_util_clk_disable_default(soc_info);
+
+disable_regulator:
+ cam_soc_util_regulator_disable_default(soc_info);
+
+
+ return rc;
+}
+
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+ bool disable_clocks, bool disable_irq)
+{
+ int rc = 0;
+
+ if (!soc_info)
+ return -EINVAL;
+
+ if (disable_irq)
+ rc |= cam_soc_util_irq_disable(soc_info);
+
+ if (disable_clocks)
+ cam_soc_util_clk_disable_default(soc_info);
+
+ cam_soc_util_regulator_disable_default(soc_info);
+
+ if (soc_info->pinctrl_info.pinctrl &&
+ soc_info->pinctrl_info.gpio_state_suspend)
+ rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+ soc_info->pinctrl_info.gpio_state_suspend);
+
+ return rc;
+}
+
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+ uint32_t base_index, uint32_t offset, int size)
+{
+ void __iomem *base_addr = NULL;
+
+ CAM_DBG(CAM_UTIL, "base_idx %u size=%d", base_index, size);
+
+ if (!soc_info || base_index >= soc_info->num_reg_map ||
+ size <= 0 || (offset + size) >=
+ CAM_SOC_GET_REG_MAP_SIZE(soc_info, base_index))
+ return -EINVAL;
+
+ base_addr = CAM_SOC_GET_REG_MAP_START(soc_info, base_index);
+
+ /*
+ * All error checking already done above,
+ * hence ignoring the return value below.
+ */
+ cam_io_dump(base_addr, offset, size);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera_v3/cam_utils/cam_soc_util.h
new file mode 100644
index 000000000000..91cc50daea83
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_soc_util.h
@@ -0,0 +1,653 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SOC_UTIL_H_
+#define _CAM_SOC_UTIL_H_
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk/qcom.h>
+#include <linux/debugfs.h>
+
+#include "cam_io_util.h"
+
+#define NO_SET_RATE -1
+#define INIT_RATE -2
+
+/* maximum number of device block */
+#define CAM_SOC_MAX_BLOCK 4
+
+/* maximum number of device base */
+#define CAM_SOC_MAX_BASE CAM_SOC_MAX_BLOCK
+
+/* maximum number of device regulator */
+#define CAM_SOC_MAX_REGULATOR 5
+
+/* maximum number of device clock */
+#define CAM_SOC_MAX_CLK 32
+
+/* soc id */
+#define SDM670_SOC_ID 336
+#define SDM710_SOC_ID 360
+#define SDM712_SOC_ID 393
+
+/* Minor Version */
+#define SDM670_V1_1 0x1
+/**
+ * enum cam_vote_level - Enum for voting level
+ *
+ * @CAM_SUSPEND_VOTE : Suspend vote
+ * @CAM_MINSVS_VOTE : Min SVS vote
+ * @CAM_LOWSVS_VOTE : Low SVS vote
+ * @CAM_SVS_VOTE : SVS vote
+ * @CAM_SVSL1_VOTE : SVS Plus vote
+ * @CAM_NOMINAL_VOTE : Nominal vote
+ * @CAM_NOMINALL1_VOTE: Nominal plus vote
+ * @CAM_TURBO_VOTE : Turbo vote
+ * @CAM_MAX_VOTE : Max voting level, This is invalid level.
+ */
+enum cam_vote_level {
+ CAM_SUSPEND_VOTE,
+ CAM_MINSVS_VOTE,
+ CAM_LOWSVS_VOTE,
+ CAM_SVS_VOTE,
+ CAM_SVSL1_VOTE,
+ CAM_NOMINAL_VOTE,
+ CAM_NOMINALL1_VOTE,
+ CAM_TURBO_VOTE,
+ CAM_MAX_VOTE,
+};
+
+/* pinctrl states */
+#define CAM_SOC_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SOC_PINCTRL_STATE_DEFAULT "cam_default"
+
+/**
+ * struct cam_soc_reg_map: Information about the mapped register space
+ *
+ * @mem_base: Starting location of MAPPED register space
+ * @mem_cam_base: Starting offset of this register space compared
+ * to ENTIRE Camera register space
+ * @size: Size of register space
+ **/
+struct cam_soc_reg_map {
+ void __iomem *mem_base;
+ uint32_t mem_cam_base;
+ resource_size_t size;
+};
+
+/**
+ * struct cam_soc_pinctrl_info: Information about pinctrl data
+ *
+ * @pinctrl: pintrl object
+ * @gpio_state_active: default pinctrl state
+ * @gpio_state_suspend suspend state of pinctrl
+ **/
+struct cam_soc_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+};
+
+/**
+ * struct cam_soc_gpio_data: Information about the gpio pins
+ *
+ * @cam_gpio_common_tbl: It is list of al the gpios present in gpios node
+ * @cam_gpio_common_tbl_size: It is equal to number of gpios prsent in
+ * gpios node in DTSI
+ * @cam_gpio_req_tbl It is list of al the requesetd gpios
+ * @cam_gpio_req_tbl_size: It is size of requested gpios
+ **/
+struct cam_soc_gpio_data {
+ struct gpio *cam_gpio_common_tbl;
+ uint8_t cam_gpio_common_tbl_size;
+ struct gpio *cam_gpio_req_tbl;
+ uint8_t cam_gpio_req_tbl_size;
+};
+
+/**
+ * struct cam_hw_soc_info: Soc information pertaining to specific instance of
+ * Camera hardware driver module
+ *
+ * @pdev: Platform device pointer
+ * @device: Device pointer
+ * @hw_version: Camera device version
+ * @index: Instance id for the camera device
+ * @dev_name: Device Name
+ * @irq_name: Name of the irq associated with the device
+ * @irq_line: Irq resource
+ * @irq_data: Private data that is passed when IRQ is requested
+ * @num_mem_block: Number of entry in the "reg-names"
+ * @mem_block_name: Array of the reg block name
+ * @mem_block_cam_base: Array of offset of this register space compared
+ * to ENTIRE Camera register space
+ * @mem_block: Associated resource structs
+ * @reg_map: Array of Mapped register info for the "reg-names"
+ * @num_reg_map: Number of mapped register space associated
+ * with mem_block. num_reg_map = num_mem_block in
+ * most cases
+ * @reserve_mem: Whether to reserve memory for Mem blocks
+ * @num_rgltr: Number of regulators
+ * @rgltr_name: Array of regulator names
+ * @rgltr_ctrl_support: Whether regulator control is supported
+ * @rgltr_min_volt: Array of minimum regulator voltage
+ * @rgltr_max_volt: Array of maximum regulator voltage
+ * @rgltr_op_mode: Array of regulator operation mode
+ * @rgltr_type: Array of regulator names
+ * @rgltr: Array of associated regulator resources
+ * @rgltr_delay: Array of regulator delay values
+ * @num_clk: Number of clocks
+ * @clk_name: Array of clock names
+ * @clk: Array of associated clock resources
+ * @clk_rate: 2D array of clock rates representing clock rate
+ * values at different vote levels
+ * @src_clk_idx: Source clock index that is rate-controllable
+ * @clk_level_valid: Indicates whether corresponding level is valid
+ * @gpio_data: Pointer to gpio info
+ * @pinctrl_info: Pointer to pinctrl info
+ * @dentry: Debugfs entry
+ * @clk_level_override: Clk level set from debugfs
+ * @clk_control: Enable/disable clk rate control through debugfs
+ * @soc_private: Soc private data
+ */
+struct cam_hw_soc_info {
+ struct platform_device *pdev;
+ struct device *dev;
+ uint32_t hw_version;
+ uint32_t index;
+ const char *dev_name;
+ const char *irq_name;
+ struct resource *irq_line;
+ void *irq_data;
+
+ uint32_t num_mem_block;
+ const char *mem_block_name[CAM_SOC_MAX_BLOCK];
+ uint32_t mem_block_cam_base[CAM_SOC_MAX_BLOCK];
+ struct resource *mem_block[CAM_SOC_MAX_BLOCK];
+ struct cam_soc_reg_map reg_map[CAM_SOC_MAX_BASE];
+ uint32_t num_reg_map;
+ uint32_t reserve_mem;
+
+ uint32_t num_rgltr;
+ const char *rgltr_name[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_ctrl_support;
+ uint32_t rgltr_min_volt[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_max_volt[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_op_mode[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_type[CAM_SOC_MAX_REGULATOR];
+ struct regulator *rgltr[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_delay[CAM_SOC_MAX_REGULATOR];
+
+ uint32_t use_shared_clk;
+ uint32_t num_clk;
+ const char *clk_name[CAM_SOC_MAX_CLK];
+ struct clk *clk[CAM_SOC_MAX_CLK];
+ int32_t clk_rate[CAM_MAX_VOTE][CAM_SOC_MAX_CLK];
+ int32_t src_clk_idx;
+ bool clk_level_valid[CAM_MAX_VOTE];
+
+ struct cam_soc_gpio_data *gpio_data;
+ struct cam_soc_pinctrl_info pinctrl_info;
+
+ struct dentry *dentry;
+ uint32_t clk_level_override;
+ bool clk_control_enable;
+
+ void *soc_private;
+};
+
+/*
+ * CAM_SOC_GET_REG_MAP_START
+ *
+ * @brief: This MACRO will get the mapped starting address
+ * where the register space can be accessed
+ *
+ * @__soc_info: Device soc information
+ * @__base_index: Index of register space in the HW block
+ *
+ * @return: Returns a pointer to the mapped register memory
+ */
+#define CAM_SOC_GET_REG_MAP_START(__soc_info, __base_index) \
+ ((!__soc_info || __base_index >= __soc_info->num_reg_map) ? \
+ NULL : __soc_info->reg_map[__base_index].mem_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_CAM_BASE
+ *
+ * @brief: This MACRO will get the cam_base of the
+ * register space
+ *
+ * @__soc_info: Device soc information
+ * @__base_index: Index of register space in the HW block
+ *
+ * @return: Returns an int32_t value.
+ * Failure: -1
+ * Success: Starting offset of register space compared
+ * to entire Camera Register Map
+ */
+#define CAM_SOC_GET_REG_MAP_CAM_BASE(__soc_info, __base_index) \
+ ((!__soc_info || __base_index >= __soc_info->num_reg_map) ? \
+ -1 : __soc_info->reg_map[__base_index].mem_cam_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_SIZE
+ *
+ * @brief: This MACRO will get the size of the mapped
+ * register space
+ *
+ * @__soc_info: Device soc information
+ * @__base_index: Index of register space in the HW block
+ *
+ * @return: Returns a uint32_t value.
+ * Failure: 0
+ * Success: Non-zero size of mapped register space
+ */
+#define CAM_SOC_GET_REG_MAP_SIZE(__soc_info, __base_index) \
+ ((!__soc_info || __base_index >= __soc_info->num_reg_map) ? \
+ 0 : __soc_info->reg_map[__base_index].size)
+
+/**
+ * cam_soc_util_get_level_from_string()
+ *
+ * @brief: Get the associated vote level for the input string
+ *
+ * @string: Input string to compare with.
+ * @level: Vote level corresponds to input string.
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_get_level_from_string(const char *string,
+ enum cam_vote_level *level);
+
+/**
+ * cam_soc_util_get_dt_properties()
+ *
+ * @brief: Parse the DT and populate the common properties that
+ * are part of the soc_info structure - register map,
+ * clocks, regulators, irq, etc.
+ *
+ * @soc_info: Device soc struct to be populated
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_request_platform_resource()
+ *
+ * @brief: Request regulator, irq, and clock resources
+ *
+ * @soc_info: Device soc information
+ * @handler: Irq handler function pointer
+ * @irq_data: Irq handler function CB data
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+ irq_handler_t handler, void *irq_data);
+
+/**
+ * cam_soc_util_release_platform_resource()
+ *
+ * @brief: Release regulator, irq, and clock resources
+ *
+ * @soc_info: Device soc information
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_enable_platform_resource()
+ *
+ * @brief: Enable regulator, irq resources
+ *
+ * @soc_info: Device soc information
+ * @enable_clocks: Boolean flag:
+ * TRUE: Enable all clocks in soc_info Now.
+ * False: Don't enable clocks Now. Driver will
+ * enable independently.
+ * @clk_level: Clock level to be applied.
+ * Applicable only if enable_clocks is true
+ * Valid range : 0 to (CAM_MAX_VOTE - 1)
+ * @enable_irq: Boolean flag:
+ * TRUE: Enable IRQ in soc_info Now.
+ * False: Don't enable IRQ Now. Driver will
+ * enable independently.
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+ bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq);
+
+/**
+ * cam_soc_util_disable_platform_resource()
+ *
+ * @brief: Disable regulator, irq resources
+ *
+ * @soc_info: Device soc information
+ * @disable_irq: Boolean flag:
+ * TRUE: Disable IRQ in soc_info Now.
+ * False: Don't disable IRQ Now. Driver will
+ * disable independently.
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+ bool disable_clocks, bool disable_irq);
+
+/**
+ * cam_soc_util_get_clk_round_rate()
+ *
+ * @brief: Get the rounded clock rate for the given clock's
+ * clock rate value
+ *
+ * @soc_info: Device soc information
+ * @clk_index: Clock index in soc_info for which round rate is needed
+ * @clk_rate: Input clock rate for which rounded rate is needed
+ *
+ * @return: Rounded clock rate
+ */
+long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_index, unsigned long clk_rate);
+
+/**
+ * cam_soc_util_set_clk_flags()
+ *
+ * @brief: Camera SOC util to set the flags for a specified clock
+ *
+ * @soc_info: Device soc information
+ * @clk_index: Clock index in soc_info for which flags are to be set
+ * @flags: Flags to set
+ *
+ * @return: Success or Failure
+ */
+int cam_soc_util_set_clk_flags(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_index, unsigned long flags);
+
+/**
+ * cam_soc_util_set_src_clk_rate()
+ *
+ * @brief: Set the rate on the source clock.
+ *
+ * @soc_info: Device soc information
+ * @clk_rate: Clock rate associated with the src clk
+ *
+ * @return: success or failure
+ */
+int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info,
+ int32_t clk_rate);
+
+/**
+ * cam_soc_util_get_option_clk_by_name()
+ *
+ * @brief: Get reference to optional clk using name
+ *
+ * @soc_info: Device soc information
+ * @clk_name: Name of clock to find reference for
+ * @clk: Clock reference pointer to be filled if Success
+ * @clk_index: Clk index in the option clk array to be returned
+ * @clk_rate: Clk rate in the option clk array
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
+ const char *clk_name, struct clk **clk, int32_t *clk_index,
+ int32_t *clk_rate);
+
+/**
+ * cam_soc_util_clk_put()
+ *
+ * @brief: Put clock specified in params
+ *
+ * @clk: Reference to the Clock that needs to be put
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_clk_put(struct clk **clk);
+
+/**
+ * cam_soc_util_clk_enable()
+ *
+ * @brief: Enable clock specified in params
+ *
+ * @clk: Clock that needs to be turned ON
+ * @clk_name: Clocks name associated with clk
+ * @clk_rate: Clocks rate associated with clk
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+ int32_t clk_rate);
+
+/**
+ * cam_soc_util_set_clk_rate_level()
+ *
+ * @brief: Apply clock rates for the requested level.
+ * This applies the new requested level for all
+ * the clocks listed in DT based on their values.
+ *
+ * @soc_info: Device soc information
+ * @clk_level: Clock level number to set
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level);
+
+/**
+ * cam_soc_util_clk_disable()
+ *
+ * @brief: Disable clock specified in params
+ *
+ * @clk: Clock that needs to be turned OFF
+ * @clk_name: Clocks name associated with clk
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name);
+
+/**
+ * cam_soc_util_irq_enable()
+ *
+ * @brief: Enable IRQ in SOC
+ *
+ * @soc_info: Device soc information
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_irq_disable()
+ *
+ * @brief: Disable IRQ in SOC
+ *
+ * @soc_info: Device soc information
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief: Enable single regulator
+ *
+ * @rgltr Regulator that needs to be turned ON
+ * @rgltr_name Associated Regulator name
+ * @rgltr_min_volt: Requested minimum volatage
+ * @rgltr_max_volt: Requested maximum volatage
+ * @rgltr_op_mode: Requested Load
+ * @rgltr_delay: Requested delay needed aaftre enabling regulator
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief: Disable single regulator
+ *
+ * @rgltr Regulator that needs to be turned ON
+ * @rgltr_name Associated Regulator name
+ * @rgltr_min_volt: Requested minimum volatage
+ * @rgltr_max_volt: Requested maximum volatage
+ * @rgltr_op_mode: Requested Load
+ * @rgltr_delay: Requested delay needed aaftre enabling regulator
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+
+/**
+ * cam_soc_util_w()
+ *
+ * @brief: Camera SOC util for register write
+ *
+ * @soc_info: Device soc information
+ * @base_index: Index of register space in the HW block
+ * @offset: Offset of register to be read
+ * @data: Value to be written
+ *
+ * @return: Success or Failure
+ */
+static inline int cam_soc_util_w(struct cam_hw_soc_info *soc_info,
+ uint32_t base_index, uint32_t offset, uint32_t data)
+{
+ if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+ return -EINVAL;
+ return cam_io_w(data,
+ CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_w_mb()
+ *
+ * @brief: Camera SOC util for register write with memory barrier.
+ * Memory Barrier is only before the write to ensure the
+ * order. If need to ensure this write is also flushed
+ * call wmb() independently in the caller.
+ *
+ * @soc_info: Device soc information
+ * @base_index: Index of register space in the HW block
+ * @offset: Offset of register to be read
+ * @data: Value to be written
+ *
+ * @return: Success or Failure
+ */
+static inline int cam_soc_util_w_mb(struct cam_hw_soc_info *soc_info,
+ uint32_t base_index, uint32_t offset, uint32_t data)
+{
+ if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+ return -EINVAL;
+ return cam_io_w_mb(data,
+ CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r()
+ *
+ * @brief: Camera SOC util for register read
+ *
+ * @soc_info: Device soc information
+ * @base_index: Index of register space in the HW block
+ * @offset: Offset of register to be read
+ *
+ * @return: Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r(struct cam_hw_soc_info *soc_info,
+ uint32_t base_index, uint32_t offset)
+{
+ if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+ return 0;
+ return cam_io_r(
+ CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r_mb()
+ *
+ * @brief: Camera SOC util for register read with memory barrier.
+ * Memory Barrier is only before the write to ensure the
+ * order. If need to ensure this write is also flushed
+ * call rmb() independently in the caller.
+ *
+ * @soc_info: Device soc information
+ * @base_index: Index of register space in the HW block
+ * @offset: Offset of register to be read
+ *
+ * @return: Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r_mb(struct cam_hw_soc_info *soc_info,
+ uint32_t base_index, uint32_t offset)
+{
+ if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+ return 0;
+ return cam_io_r_mb(
+ CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_reg_dump()
+ *
+ * @brief: Camera SOC util for dumping a range of register
+ *
+ * @soc_info: Device soc information
+ * @base_index: Index of register space in the HW block
+ * @offset: Start register offset for the dump
+ * @size: Size specifying the range for dump
+ *
+ * @return: Success or Failure
+ */
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+ uint32_t base_index, uint32_t offset, int size);
+
+void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info);
+
+int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level);
+/**
+ * cam_soc_util_get_soc_id()
+ *
+ * @brief: Read soc id
+ *
+ * @return SOC id
+ */
+uint32_t cam_soc_util_get_soc_id(void);
+
+/**
+ * cam_soc_util_get_hw_revision_node()
+ *
+ * @brief: Camera HW ID
+ *
+ * @soc_info: Device soc information
+ *
+ * @return HW id
+ */
+uint32_t cam_soc_util_get_hw_revision_node(struct cam_hw_soc_info *soc_info);
+#endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_trace.c b/drivers/media/platform/msm/camera_v3/cam_utils/cam_trace.c
new file mode 100644
index 000000000000..99bcb32a9811
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_trace.c
@@ -0,0 +1,16 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "cam_trace.h"
diff --git a/drivers/media/platform/msm/camera_v3/cam_utils/cam_trace.h b/drivers/media/platform/msm/camera_v3/cam_utils/cam_trace.h
new file mode 100644
index 000000000000..c8b1fb00c124
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v3/cam_utils/cam_trace.h
@@ -0,0 +1,309 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_CAM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _CAM_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM camera
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cam_trace
+
+#include <linux/tracepoint.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_core.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_context.h"
+
+TRACE_EVENT(cam_context_state,
+ TP_PROTO(const char *name, struct cam_context *ctx),
+ TP_ARGS(name, ctx),
+ TP_STRUCT__entry(
+ __field(void*, ctx)
+ __field(uint32_t, state)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->state = ctx->state;
+ __assign_str(name, name);
+ ),
+ TP_printk(
+ "%s: State ctx=%p ctx_state=%u",
+ __get_str(name), __entry->ctx, __entry->state
+ )
+);
+
+TRACE_EVENT(cam_isp_activated_irq,
+ TP_PROTO(struct cam_context *ctx, unsigned int substate,
+ unsigned int event, uint64_t timestamp),
+ TP_ARGS(ctx, substate, event, timestamp),
+ TP_STRUCT__entry(
+ __field(void*, ctx)
+ __field(uint32_t, state)
+ __field(uint32_t, substate)
+ __field(uint32_t, event)
+ __field(uint64_t, ts)
+ ),
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->state = ctx->state;
+ __entry->substate = substate;
+ __entry->event = event;
+ __entry->ts = timestamp;
+ ),
+ TP_printk(
+ "ISP: IRQ ctx=%p ctx_state=%u substate=%u event=%u ts=%llu",
+ __entry->ctx, __entry->state, __entry->substate,
+ __entry->event, __entry->ts
+ )
+);
+
+TRACE_EVENT(cam_icp_fw_dbg,
+ TP_PROTO(char *dbg_message, uint64_t timestamp),
+ TP_ARGS(dbg_message, timestamp),
+ TP_STRUCT__entry(
+ __string(dbg_message, dbg_message)
+ __field(uint64_t, timestamp)
+ ),
+ TP_fast_assign(
+ __assign_str(dbg_message, dbg_message);
+ __entry->timestamp = timestamp;
+ ),
+ TP_printk(
+ "%llu %s: ",
+ __entry->timestamp, __get_str(dbg_message)
+ )
+);
+
+TRACE_EVENT(cam_buf_done,
+ TP_PROTO(const char *ctx_type, struct cam_context *ctx,
+ struct cam_ctx_request *req),
+ TP_ARGS(ctx_type, ctx, req),
+ TP_STRUCT__entry(
+ __string(ctx_type, ctx_type)
+ __field(void*, ctx)
+ __field(uint64_t, request)
+ ),
+ TP_fast_assign(
+ __assign_str(ctx_type, ctx_type);
+ __entry->ctx = ctx;
+ __entry->request = req->request_id;
+ ),
+ TP_printk(
+ "%5s: BufDone ctx=%p request=%llu",
+ __get_str(ctx_type), __entry->ctx, __entry->request
+ )
+);
+
+TRACE_EVENT(cam_apply_req,
+ TP_PROTO(const char *entity, uint64_t req_id),
+ TP_ARGS(entity, req_id),
+ TP_STRUCT__entry(
+ __string(entity, entity)
+ __field(uint64_t, req_id)
+ ),
+ TP_fast_assign(
+ __assign_str(entity, entity);
+ __entry->req_id = req_id;
+ ),
+ TP_printk(
+ "%8s: ApplyRequest request=%llu",
+ __get_str(entity), __entry->req_id
+ )
+);
+
+TRACE_EVENT(cam_flush_req,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_flush_info *info),
+ TP_ARGS(link, info),
+ TP_STRUCT__entry(
+ __field(uint32_t, type)
+ __field(int64_t, req_id)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __entry->type = info->flush_type;
+ __entry->req_id = info->req_id;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "FlushRequest type=%u request=%llu link=%pK session=%pK",
+ __entry->type, __entry->req_id, __entry->link,
+ __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_connect_device,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_device_info *info),
+ TP_ARGS(link, info),
+ TP_STRUCT__entry(
+ __string(name, info->name)
+ __field(uint32_t, id)
+ __field(uint32_t, delay)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, info->name);
+ __entry->id = info->dev_id;
+ __entry->delay = info->p_delay;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr Connect name=%s id=%u pd=%d link=%pK session=%pK",
+ __get_str(name), __entry->id, __entry->delay,
+ __entry->link, __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_apply_request,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_apply_request *req,
+ struct cam_req_mgr_connected_device *dev),
+ TP_ARGS(link, req, dev),
+ TP_STRUCT__entry(
+ __string(name, dev->dev_info.name)
+ __field(uint32_t, dev_id)
+ __field(uint64_t, req_id)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev->dev_info.name);
+ __entry->dev_id = dev->dev_info.dev_id;
+ __entry->req_id = req->request_id;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr ApplyRequest devname=%s devid=%u request=%lld link=%pK session=%pK",
+ __get_str(name), __entry->dev_id, __entry->req_id,
+ __entry->link, __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_add_req,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ int idx, struct cam_req_mgr_add_request *add_req,
+ struct cam_req_mgr_req_tbl *tbl,
+ struct cam_req_mgr_connected_device *dev),
+ TP_ARGS(link, idx, add_req, tbl, dev),
+ TP_STRUCT__entry(
+ __string(name, dev->dev_info.name)
+ __field(uint32_t, dev_id)
+ __field(uint64_t, req_id)
+ __field(uint32_t, slot_id)
+ __field(uint32_t, delay)
+ __field(uint32_t, readymap)
+ __field(uint32_t, devicemap)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev->dev_info.name);
+ __entry->dev_id = dev->dev_info.dev_id;
+ __entry->req_id = add_req->req_id;
+ __entry->slot_id = idx;
+ __entry->delay = tbl->pd;
+ __entry->readymap = tbl->slot[idx].req_ready_map;
+ __entry->devicemap = tbl->dev_mask;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr AddRequest devname=%s devid=%d request=%lld slot=%d pd=%d readymap=%x devicemap=%d link=%pK session=%pK",
+ __get_str(name), __entry->dev_id, __entry->req_id,
+ __entry->slot_id, __entry->delay, __entry->readymap,
+ __entry->devicemap, __entry->link, __entry->session
+ )
+);
+
+TRACE_EVENT(cam_submit_to_hw,
+ TP_PROTO(const char *entity, uint64_t req_id),
+ TP_ARGS(entity, req_id),
+ TP_STRUCT__entry(
+ __string(entity, entity)
+ __field(uint64_t, req_id)
+ ),
+ TP_fast_assign(
+ __assign_str(entity, entity);
+ __entry->req_id = req_id;
+ ),
+ TP_printk(
+ "%8s: submit request=%llu",
+ __get_str(entity), __entry->req_id
+ )
+);
+
+TRACE_EVENT(cam_irq_activated,
+ TP_PROTO(const char *entity, uint32_t irq_type),
+ TP_ARGS(entity, irq_type),
+ TP_STRUCT__entry(
+ __string(entity, entity)
+ __field(uint32_t, irq_type)
+ ),
+ TP_fast_assign(
+ __assign_str(entity, entity);
+ __entry->irq_type = irq_type;
+ ),
+ TP_printk(
+ "%8s: got irq type=%d",
+ __get_str(entity), __entry->irq_type
+ )
+);
+
+TRACE_EVENT(cam_irq_handled,
+ TP_PROTO(const char *entity, uint32_t irq_type),
+ TP_ARGS(entity, irq_type),
+ TP_STRUCT__entry(
+ __string(entity, entity)
+ __field(uint32_t, irq_type)
+ ),
+ TP_fast_assign(
+ __assign_str(entity, entity);
+ __entry->irq_type = irq_type;
+ ),
+ TP_printk(
+ "%8s: handled irq type=%d",
+ __get_str(entity), __entry->irq_type
+ )
+);
+
+TRACE_EVENT(cam_cdm_cb,
+ TP_PROTO(const char *entity, uint32_t status),
+ TP_ARGS(entity, status),
+ TP_STRUCT__entry(
+ __string(entity, entity)
+ __field(uint32_t, status)
+ ),
+ TP_fast_assign(
+ __assign_str(entity, entity);
+ __entry->status = status;
+ ),
+ TP_printk(
+ "%8s: cdm cb status=%d",
+ __get_str(entity), __entry->status
+ )
+);
+
+#endif /* _CAM_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 7c01a185d2cd..d8478fd5a903 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -327,7 +327,7 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
{
struct hfi_queue_header *queue;
u32 packet_size_in_words, new_write_idx;
- u32 empty_space, read_idx;
+ u32 empty_space, read_idx, write_idx;
u32 *write_ptr;
if (!qinfo || !packet) {
@@ -350,16 +350,18 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
}
packet_size_in_words = (*(u32 *)packet) >> 2;
- if (!packet_size_in_words) {
- dprintk(VIDC_ERR, "Zero packet size\n");
+ if (!packet_size_in_words || packet_size_in_words >
+ qinfo->q_array.mem_size>>2) {
+ dprintk(VIDC_ERR, "Invalid packet size\n");
return -ENODATA;
}
read_idx = queue->qhdr_read_idx;
+ write_idx = queue->qhdr_write_idx;
- empty_space = (queue->qhdr_write_idx >= read_idx) ?
- (queue->qhdr_q_size - (queue->qhdr_write_idx - read_idx)) :
- (read_idx - queue->qhdr_write_idx);
+ empty_space = (write_idx >= read_idx) ?
+ ((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
+ (read_idx - write_idx);
if (empty_space <= packet_size_in_words) {
queue->qhdr_tx_req = 1;
dprintk(VIDC_ERR, "Insufficient size (%d) to write (%d)\n",
@@ -369,13 +371,20 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
queue->qhdr_tx_req = 0;
- new_write_idx = (queue->qhdr_write_idx + packet_size_in_words);
+ new_write_idx = write_idx + packet_size_in_words;
write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
- (queue->qhdr_write_idx << 2));
- if (new_write_idx < queue->qhdr_q_size) {
+ (write_idx << 2));
+ if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+ write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+ qinfo->q_array.mem_size)) {
+ dprintk(VIDC_ERR, "Invalid write index");
+ return -ENODATA;
+ }
+
+ if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(write_ptr, packet, packet_size_in_words << 2);
} else {
- new_write_idx -= queue->qhdr_q_size;
+ new_write_idx -= qinfo->q_array.mem_size >> 2;
memcpy(write_ptr, packet, (packet_size_in_words -
new_write_idx) << 2);
memcpy((void *)qinfo->q_array.align_virtual_addr,
@@ -471,7 +480,8 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
u32 packet_size_in_words, new_read_idx;
u32 *read_ptr;
u32 receive_request = 0;
- int rc = 0;
+ u32 read_idx, write_idx;
+ int rc = 0;
if (!qinfo || !packet || !pb_tx_req_is_set) {
dprintk(VIDC_ERR, "Invalid Params\n");
@@ -504,7 +514,10 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
receive_request = 1;
- if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
+ read_idx = queue->qhdr_read_idx;
+ write_idx = queue->qhdr_write_idx;
+
+ if (read_idx == write_idx) {
queue->qhdr_rx_req = receive_request;
/*
* mb() to ensure qhdr is updated in main memory
@@ -521,21 +534,28 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
}
read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
- (queue->qhdr_read_idx << 2));
+ (read_idx << 2));
+ if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+ read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+ qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+ dprintk(VIDC_ERR, "Invalid read index\n");
+ return -ENODATA;
+ }
+
packet_size_in_words = (*read_ptr) >> 2;
if (!packet_size_in_words) {
dprintk(VIDC_ERR, "Zero packet size\n");
return -ENODATA;
}
- new_read_idx = queue->qhdr_read_idx + packet_size_in_words;
- if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE)
- && queue->qhdr_read_idx <= queue->qhdr_q_size) {
- if (new_read_idx < queue->qhdr_q_size) {
+ new_read_idx = read_idx + packet_size_in_words;
+ if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
+ read_idx <= (qinfo->q_array.mem_size >> 2)) {
+ if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(packet, read_ptr,
packet_size_in_words << 2);
} else {
- new_read_idx -= queue->qhdr_q_size;
+ new_read_idx -= (qinfo->q_array.mem_size >> 2);
memcpy(packet, read_ptr,
(packet_size_in_words - new_read_idx) << 2);
memcpy(packet + ((packet_size_in_words -
@@ -546,18 +566,18 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
} else {
dprintk(VIDC_WARN,
"BAD packet received, read_idx: %#x, pkt_size: %d\n",
- queue->qhdr_read_idx, packet_size_in_words << 2);
+ read_idx, packet_size_in_words << 2);
dprintk(VIDC_WARN, "Dropping this packet\n");
- new_read_idx = queue->qhdr_write_idx;
+ new_read_idx = write_idx;
rc = -ENODATA;
}
- queue->qhdr_read_idx = new_read_idx;
-
- if (queue->qhdr_read_idx != queue->qhdr_write_idx)
+ if (new_read_idx != write_idx)
queue->qhdr_rx_req = 0;
else
queue->qhdr_rx_req = receive_request;
+
+ queue->qhdr_read_idx = new_read_idx;
/*
* mb() to ensure qhdr is updated in main memory
* so that venus reads the updated header values
diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
index 1bd6ae8959cf..2db49fa558f5 100644
--- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2018-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -330,7 +330,7 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
{
struct hfi_queue_header *queue;
u32 packet_size_in_words, new_write_idx;
- u32 empty_space, read_idx;
+ u32 empty_space, read_idx, write_idx;
u32 *write_ptr;
if (!qinfo || !packet) {
@@ -353,16 +353,18 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
}
packet_size_in_words = (*(u32 *)packet) >> 2;
- if (!packet_size_in_words) {
- dprintk(VIDC_ERR, "Zero packet size\n");
+ if (!packet_size_in_words || packet_size_in_words >
+ qinfo->q_array.mem_size>>2) {
+ dprintk(VIDC_ERR, "Invalid packet size\n");
return -ENODATA;
}
read_idx = queue->qhdr_read_idx;
+ write_idx = queue->qhdr_write_idx;
- empty_space = (queue->qhdr_write_idx >= read_idx) ?
- (queue->qhdr_q_size - (queue->qhdr_write_idx - read_idx)) :
- (read_idx - queue->qhdr_write_idx);
+ empty_space = (write_idx >= read_idx) ?
+ ((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
+ (read_idx - write_idx);
if (empty_space <= packet_size_in_words) {
queue->qhdr_tx_req = 1;
dprintk(VIDC_ERR, "Insufficient size (%d) to write (%d)\n",
@@ -372,13 +374,20 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
queue->qhdr_tx_req = 0;
- new_write_idx = (queue->qhdr_write_idx + packet_size_in_words);
+ new_write_idx = write_idx + packet_size_in_words;
write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
- (queue->qhdr_write_idx << 2));
- if (new_write_idx < queue->qhdr_q_size) {
+ (write_idx << 2));
+ if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+ write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+ qinfo->q_array.mem_size)) {
+ dprintk(VIDC_ERR, "Invalid write index");
+ return -ENODATA;
+ }
+
+ if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(write_ptr, packet, packet_size_in_words << 2);
} else {
- new_write_idx -= queue->qhdr_q_size;
+ new_write_idx -= qinfo->q_array.mem_size >> 2;
memcpy(write_ptr, packet, (packet_size_in_words -
new_write_idx) << 2);
memcpy((void *)qinfo->q_array.align_virtual_addr,
@@ -472,7 +481,8 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
u32 packet_size_in_words, new_read_idx;
u32 *read_ptr;
u32 receive_request = 0;
- int rc = 0;
+ u32 read_idx, write_idx;
+ int rc = 0;
if (!qinfo || !packet || !pb_tx_req_is_set) {
dprintk(VIDC_ERR, "Invalid Params\n");
@@ -504,7 +514,10 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
receive_request = 1;
- if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
+ read_idx = queue->qhdr_read_idx;
+ write_idx = queue->qhdr_write_idx;
+
+ if (read_idx == write_idx) {
queue->qhdr_rx_req = receive_request;
*pb_tx_req_is_set = 0;
dprintk(VIDC_DBG,
@@ -516,21 +529,28 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
}
read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
- (queue->qhdr_read_idx << 2));
+ (read_idx << 2));
+ if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+ read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+ qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+ dprintk(VIDC_ERR, "Invalid read index\n");
+ return -ENODATA;
+ }
+
packet_size_in_words = (*read_ptr) >> 2;
if (!packet_size_in_words) {
dprintk(VIDC_ERR, "Zero packet size\n");
return -ENODATA;
}
- new_read_idx = queue->qhdr_read_idx + packet_size_in_words;
- if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE)
- && queue->qhdr_read_idx <= queue->qhdr_q_size) {
- if (new_read_idx < queue->qhdr_q_size) {
+ new_read_idx = read_idx + packet_size_in_words;
+ if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
+ read_idx <= (qinfo->q_array.mem_size >> 2)) {
+ if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(packet, read_ptr,
packet_size_in_words << 2);
} else {
- new_read_idx -= queue->qhdr_q_size;
+ new_read_idx -= (qinfo->q_array.mem_size >> 2);
memcpy(packet, read_ptr,
(packet_size_in_words - new_read_idx) << 2);
memcpy(packet + ((packet_size_in_words -
@@ -541,19 +561,18 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
} else {
dprintk(VIDC_WARN,
"BAD packet received, read_idx: %#x, pkt_size: %d\n",
- queue->qhdr_read_idx, packet_size_in_words << 2);
+ read_idx, packet_size_in_words << 2);
dprintk(VIDC_WARN, "Dropping this packet\n");
- new_read_idx = queue->qhdr_write_idx;
+ new_read_idx = write_idx;
rc = -ENODATA;
}
- queue->qhdr_read_idx = new_read_idx;
-
- if (queue->qhdr_read_idx != queue->qhdr_write_idx)
+ if (new_read_idx != write_idx)
queue->qhdr_rx_req = 0;
else
queue->qhdr_rx_req = receive_request;
+ queue->qhdr_read_idx = new_read_idx;
*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
if (msm_vidc_debug & VIDC_PKT) {
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index c0c698317d5a..07dbf1740353 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1,7 +1,7 @@
/*
* QTI Secure Execution Environment Communicator (QSEECOM) driver
*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -143,6 +143,8 @@ static dev_t qseecom_device_no;
static DEFINE_MUTEX(qsee_bw_mutex);
static DEFINE_MUTEX(app_access_lock);
static DEFINE_MUTEX(clk_access_lock);
+static DEFINE_MUTEX(listener_access_lock);
+
struct sglist_info {
uint32_t indexAndFlags;
@@ -182,15 +184,21 @@ struct qseecom_registered_listener_list {
size_t sb_length;
struct ion_handle *ihandle; /* Retrieve phy addr */
wait_queue_head_t rcv_req_wq;
- /* rcv_req_flag: -1: not ready; 0: ready and empty; 1: received req */
+ /* rcv_req_flag: 0: ready and empty; 1: received req */
int rcv_req_flag;
int send_resp_flag;
bool listener_in_use;
/* wq for thread blocked on this listener*/
wait_queue_head_t listener_block_app_wq;
- struct sglist_info sglistinfo_ptr[MAX_ION_FD];
- uint32_t sglist_cnt;
- int abort;
+ struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+ uint32_t sglist_cnt;
+ int abort;
+ bool unregister_pending;
+};
+
+struct qseecom_unregister_pending_list {
+ struct list_head list;
+ struct qseecom_dev_handle *data;
};
struct qseecom_registered_app_list {
@@ -238,7 +246,6 @@ struct qseecom_clk {
struct qseecom_control {
struct ion_client *ion_clnt; /* Ion client */
struct list_head registered_listener_list_head;
- spinlock_t registered_listener_list_lock;
struct list_head registered_app_list_head;
spinlock_t registered_app_list_lock;
@@ -284,6 +291,9 @@ struct qseecom_control {
atomic_t qseecom_state;
int is_apps_region_protected;
bool smcinvoke_support;
+
+ struct list_head unregister_lsnr_pending_list_head;
+ wait_queue_head_t register_lsnr_pending_wq;
};
struct qseecom_sec_buf_fd_info {
@@ -312,6 +322,8 @@ struct qseecom_client_handle {
struct qseecom_listener_handle {
u32 id;
+ bool unregister_pending;
+ bool release_called;
};
static struct qseecom_control qseecom;
@@ -585,13 +597,10 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
}
qseecom.smcinvoke_support = true;
smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
- __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
ret = scm_call2(smc_id, &desc);
- if (ret) {
+ if (ret && ret != -EBUSY) {
qseecom.smcinvoke_support = false;
smc_id = TZ_OS_REGISTER_LISTENER_ID;
- __qseecom_reentrancy_check_if_no_app_blocked(
- smc_id);
ret = scm_call2(smc_id, &desc);
}
break;
@@ -604,7 +613,6 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
desc.args[0] = req->listener_id;
- __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
ret = scm_call2(smc_id, &desc);
break;
}
@@ -1065,42 +1073,18 @@ static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
}
-static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
- struct qseecom_register_listener_req *svc)
-{
- struct qseecom_registered_listener_list *ptr;
- int unique = 1;
- unsigned long flags;
-
- spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
- list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
- if (ptr->svc.listener_id == svc->listener_id) {
- pr_err("Service id: %u is already registered\n",
- ptr->svc.listener_id);
- unique = 0;
- break;
- }
- }
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
- return unique;
-}
-
static struct qseecom_registered_listener_list *__qseecom_find_svc(
int32_t listener_id)
{
struct qseecom_registered_listener_list *entry = NULL;
- unsigned long flags;
- spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
list_for_each_entry(entry,
&qseecom.registered_listener_list_head, list) {
if (entry->svc.listener_id == listener_id)
break;
}
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
-
if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
- pr_err("Service id: %u is not found\n", listener_id);
+ pr_debug("Service id: %u is not found\n", listener_id);
return NULL;
}
@@ -1179,9 +1163,9 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data,
void __user *argp)
{
int ret = 0;
- unsigned long flags;
struct qseecom_register_listener_req rcvd_lstnr;
struct qseecom_registered_listener_list *new_entry;
+ struct qseecom_registered_listener_list *ptr_svc;
ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
if (ret) {
@@ -1193,18 +1177,37 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data,
return -EFAULT;
data->listener.id = rcvd_lstnr.listener_id;
- if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
- pr_err("Service %d is not unique and failed to register\n",
+
+ ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
+ if (ptr_svc) {
+ if (ptr_svc->unregister_pending == false) {
+ pr_err("Service %d is not unique\n",
rcvd_lstnr.listener_id);
data->released = true;
return -EBUSY;
+ } else {
+ /*wait until listener is unregistered*/
+ pr_debug("register %d has to wait\n",
+ rcvd_lstnr.listener_id);
+ mutex_unlock(&listener_access_lock);
+ ret = wait_event_freezable(
+ qseecom.register_lsnr_pending_wq,
+ list_empty(
+ &qseecom.unregister_lsnr_pending_list_head));
+ if (ret) {
+ pr_err("interrupted register_pending_wq %d\n",
+ rcvd_lstnr.listener_id);
+ mutex_lock(&listener_access_lock);
+ return -ERESTARTSYS;
+ }
+ mutex_lock(&listener_access_lock);
+ }
}
-
new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
- new_entry->rcv_req_flag = -1;
+ new_entry->rcv_req_flag = 0;
new_entry->svc.listener_id = rcvd_lstnr.listener_id;
new_entry->sb_length = rcvd_lstnr.sb_size;
@@ -1220,45 +1223,20 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data,
init_waitqueue_head(&new_entry->listener_block_app_wq);
new_entry->send_resp_flag = 0;
new_entry->listener_in_use = false;
- spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
return ret;
}
-static void __qseecom_listener_abort_all(int abort)
-{
- struct qseecom_registered_listener_list *entry = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
- list_for_each_entry(entry,
- &qseecom.registered_listener_list_head, list) {
- pr_debug("set abort %d for listener %d\n",
- abort, entry->svc.listener_id);
- entry->abort = abort;
- }
- if (abort)
- wake_up_interruptible_all(&qseecom.send_resp_wq);
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
-}
-
-static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
+ struct qseecom_registered_listener_list *ptr_svc)
{
int ret = 0;
struct qseecom_register_listener_ireq req;
- struct qseecom_registered_listener_list *ptr_svc = NULL;
struct qseecom_command_scm_resp resp;
struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
- ptr_svc = __qseecom_find_svc(data->listener.id);
- if (!ptr_svc) {
- pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
- return -ENODATA;
- }
-
req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
req.listener_id = data->listener.id;
resp.result = QSEOS_RESULT_INCOMPLETE;
@@ -1268,6 +1246,8 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
if (ret) {
pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
ret, data->listener.id);
+ if (ret == -EBUSY)
+ return ret;
goto exit;
}
@@ -1279,7 +1259,6 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
}
data->abort = 1;
- ptr_svc->abort = 1;
wake_up_all(&ptr_svc->rcv_req_wq);
while (atomic_read(&data->ioctl_count) > 1) {
@@ -1306,6 +1285,77 @@ exit:
return ret;
}
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+ struct qseecom_registered_listener_list *ptr_svc = NULL;
+ struct qseecom_unregister_pending_list *entry = NULL;
+
+ ptr_svc = __qseecom_find_svc(data->listener.id);
+ if (!ptr_svc) {
+ pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
+ return -ENODATA;
+ }
+ /* stop CA thread waiting for listener response */
+ ptr_svc->abort = 1;
+ wake_up_interruptible_all(&qseecom.send_resp_wq);
+
+ /* return directly if pending*/
+ if (ptr_svc->unregister_pending)
+ return 0;
+
+ /*add unregistration into pending list*/
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->data = data;
+ list_add_tail(&entry->list,
+ &qseecom.unregister_lsnr_pending_list_head);
+ ptr_svc->unregister_pending = true;
+ pr_debug("unregister %d pending\n", data->listener.id);
+ return 0;
+}
+
+static void __qseecom_processing_pending_lsnr_unregister(void)
+{
+ struct qseecom_unregister_pending_list *entry = NULL;
+ struct qseecom_registered_listener_list *ptr_svc = NULL;
+ struct list_head *pos;
+ int ret = 0;
+
+ mutex_lock(&listener_access_lock);
+ while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
+ pos = qseecom.unregister_lsnr_pending_list_head.next;
+ entry = list_entry(pos,
+ struct qseecom_unregister_pending_list, list);
+ if (entry && entry->data) {
+ pr_debug("process pending unregister %d\n",
+ entry->data->listener.id);
+ /* don't process if qseecom_release is not called*/
+ if (!entry->data->listener.release_called)
+ break;
+ ptr_svc = __qseecom_find_svc(
+ entry->data->listener.id);
+ if (ptr_svc) {
+ ret = __qseecom_unregister_listener(
+ entry->data, ptr_svc);
+ if (ret == -EBUSY) {
+ pr_debug("unregister %d pending again\n",
+ entry->data->listener.id);
+ mutex_unlock(&listener_access_lock);
+ return;
+ }
+ } else
+ pr_err("invalid listener %d\n",
+ entry->data->listener.id);
+ kzfree(entry->data);
+ }
+ list_del(pos);
+ kzfree(entry);
+ }
+ mutex_unlock(&listener_access_lock);
+ wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
+}
+
static int __qseecom_set_msm_bus_request(uint32_t mode)
{
int ret = 0;
@@ -1640,19 +1690,12 @@ static void __qseecom_clean_listener_sglistinfo(
}
}
-static int __is_listener_rcv_wq_not_ready(
- struct qseecom_registered_listener_list *ptr_svc)
-{
- return ptr_svc->rcv_req_flag == -1;
-}
-
static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
{
int ret = 0;
int rc = 0;
uint32_t lstnr;
- unsigned long flags;
struct qseecom_client_listener_data_irsp send_data_rsp = {0};
struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
= {0};
@@ -1663,30 +1706,23 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
void *cmd_buf = NULL;
size_t cmd_len;
struct sglist_info *table = NULL;
- bool not_ready = false;
+ qseecom.app_block_ref_cnt++;
while (resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
/*
* Wake up blocking lsitener service with the lstnr id
*/
- spin_lock_irqsave(&qseecom.registered_listener_list_lock,
- flags);
+ mutex_lock(&listener_access_lock);
list_for_each_entry(ptr_svc,
&qseecom.registered_listener_list_head, list) {
if (ptr_svc->svc.listener_id == lstnr) {
- if (__is_listener_rcv_wq_not_ready(ptr_svc)) {
- not_ready = true;
- break;
- }
ptr_svc->listener_in_use = true;
ptr_svc->rcv_req_flag = 1;
wake_up_interruptible(&ptr_svc->rcv_req_wq);
break;
}
}
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
- flags);
if (ptr_svc == NULL) {
pr_err("Listener Svc %d does not exist\n", lstnr);
@@ -1712,22 +1748,13 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
}
if (ptr_svc->abort == 1) {
- pr_err("Service %d abort %d\n",
+ pr_debug("Service %d abort %d\n",
lstnr, ptr_svc->abort);
rc = -ENODEV;
status = QSEOS_RESULT_FAILURE;
goto err_resp;
}
- if (not_ready) {
- pr_err("Service %d is not ready to receive request\n",
- lstnr);
- rc = -ENOENT;
- status = QSEOS_RESULT_FAILURE;
- goto err_resp;
-
- }
-
pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
/* initialize the new signal mask with all signals*/
@@ -1735,6 +1762,7 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
/* block all signals */
sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+ mutex_unlock(&listener_access_lock);
do {
/*
* When reentrancy is not supported, check global
@@ -1755,7 +1783,7 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
break;
}
} while (1);
-
+ mutex_lock(&listener_access_lock);
/* restore signal mask */
sigprocmask(SIG_SETMASK, &old_sigset, NULL);
if (data->abort || ptr_svc->abort) {
@@ -1811,14 +1839,14 @@ err_resp:
ION_IOC_CLEAN_INV_CACHES);
if (ret) {
pr_err("cache operation failed %d\n", ret);
- return ret;
+ goto exit;
}
}
if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
ret = __qseecom_enable_clk(CLK_QSEE);
if (ret)
- return ret;
+ goto exit;
}
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
@@ -1832,7 +1860,7 @@ err_resp:
ret, data->client.app_id);
if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
__qseecom_disable_clk(CLK_QSEE);
- return ret;
+ goto exit;
}
pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
status, resp->result, data->client.app_id, lstnr);
@@ -1841,11 +1869,15 @@ err_resp:
pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
resp->result, data->client.app_id, lstnr);
ret = -EINVAL;
+ goto exit;
}
+exit:
+ mutex_unlock(&listener_access_lock);
if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
__qseecom_disable_clk(CLK_QSEE);
}
+ qseecom.app_block_ref_cnt--;
if (rc)
return rc;
@@ -1898,10 +1930,12 @@ static int __qseecom_process_reentrancy_blocked_on_listener(
do {
session_id = resp->resp_type;
+ mutex_lock(&listener_access_lock);
list_ptr = __qseecom_find_svc(resp->data);
if (!list_ptr) {
pr_err("Invalid listener ID %d\n", resp->data);
ret = -ENODATA;
+ mutex_unlock(&listener_access_lock);
goto exit;
}
ptr_app->blocked_on_listener_id = resp->data;
@@ -1917,11 +1951,13 @@ static int __qseecom_process_reentrancy_blocked_on_listener(
do {
qseecom.app_block_ref_cnt++;
ptr_app->app_blocked = true;
+ mutex_unlock(&listener_access_lock);
mutex_unlock(&app_access_lock);
wait_event_freezable(
list_ptr->listener_block_app_wq,
!list_ptr->listener_in_use);
mutex_lock(&app_access_lock);
+ mutex_lock(&listener_access_lock);
ptr_app->app_blocked = false;
qseecom.app_block_ref_cnt--;
} while (list_ptr->listener_in_use);
@@ -1954,9 +1990,11 @@ static int __qseecom_process_reentrancy_blocked_on_listener(
if (ret) {
pr_err("unblock app %d or session %d fail\n",
data->client.app_id, session_id);
+ mutex_unlock(&listener_access_lock);
goto exit;
}
}
+ mutex_unlock(&listener_access_lock);
resp->result = continue_resp.result;
resp->resp_type = continue_resp.resp_type;
resp->data = continue_resp.data;
@@ -1978,7 +2016,6 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
int ret = 0;
int rc = 0;
uint32_t lstnr;
- unsigned long flags;
struct qseecom_client_listener_data_irsp send_data_rsp = {0};
struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
= {0};
@@ -1989,30 +2026,22 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
void *cmd_buf = NULL;
size_t cmd_len;
struct sglist_info *table = NULL;
- bool not_ready = false;
while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
/*
* Wake up blocking lsitener service with the lstnr id
*/
- spin_lock_irqsave(&qseecom.registered_listener_list_lock,
- flags);
+ mutex_lock(&listener_access_lock);
list_for_each_entry(ptr_svc,
&qseecom.registered_listener_list_head, list) {
if (ptr_svc->svc.listener_id == lstnr) {
- if (__is_listener_rcv_wq_not_ready(ptr_svc)) {
- not_ready = true;
- break;
- }
ptr_svc->listener_in_use = true;
ptr_svc->rcv_req_flag = 1;
wake_up_interruptible(&ptr_svc->rcv_req_wq);
break;
}
}
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
- flags);
if (ptr_svc == NULL) {
pr_err("Listener Svc %d does not exist\n", lstnr);
@@ -2038,22 +2067,13 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
}
if (ptr_svc->abort == 1) {
- pr_err("Service %d abort %d\n",
+ pr_debug("Service %d abort %d\n",
lstnr, ptr_svc->abort);
rc = -ENODEV;
status = QSEOS_RESULT_FAILURE;
goto err_resp;
}
- if (not_ready) {
- pr_err("Service %d is not ready to receive request\n",
- lstnr);
- rc = -ENOENT;
- status = QSEOS_RESULT_FAILURE;
- goto err_resp;
-
- }
-
pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
/* initialize the new signal mask with all signals*/
@@ -2063,6 +2083,7 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
/* unlock mutex btw waking listener and sleep-wait */
+ mutex_unlock(&listener_access_lock);
mutex_unlock(&app_access_lock);
do {
if (!wait_event_freezable(qseecom.send_resp_wq,
@@ -2073,6 +2094,7 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
} while (1);
/* lock mutex again after resp sent */
mutex_lock(&app_access_lock);
+ mutex_lock(&listener_access_lock);
ptr_svc->send_resp_flag = 0;
qseecom.send_resp_flag = 0;
@@ -2134,7 +2156,7 @@ err_resp:
if (lstnr == RPMB_SERVICE) {
ret = __qseecom_enable_clk(CLK_QSEE);
if (ret)
- return ret;
+ goto exit;
}
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
@@ -2161,8 +2183,10 @@ err_resp:
ret = -EINVAL;
goto exit;
}
+ mutex_unlock(&listener_access_lock);
ret = __qseecom_process_reentrancy_blocked_on_listener(
resp, NULL, data);
+ mutex_lock(&listener_access_lock);
if (ret) {
pr_err("failed to process App(%d) %s blocked on listener %d\n",
data->client.app_id,
@@ -2179,6 +2203,7 @@ err_resp:
goto exit;
}
exit:
+ mutex_unlock(&listener_access_lock);
if (lstnr == RPMB_SERVICE)
__qseecom_disable_clk(CLK_QSEE);
@@ -3267,6 +3292,7 @@ exit:
pr_err("cache operation failed %d\n", ret2);
return ret2;
}
+ __qseecom_processing_pending_lsnr_unregister();
return ret;
}
@@ -3862,7 +3888,7 @@ static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
int ret;
ret = (svc->rcv_req_flag == 1);
- return ret || data->abort || svc->abort;
+ return ret || data->abort;
}
static int qseecom_receive_req(struct qseecom_dev_handle *data)
@@ -3870,14 +3896,14 @@ static int qseecom_receive_req(struct qseecom_dev_handle *data)
int ret = 0;
struct qseecom_registered_listener_list *this_lstnr;
+ mutex_lock(&listener_access_lock);
this_lstnr = __qseecom_find_svc(data->listener.id);
if (!this_lstnr) {
pr_err("Invalid listener ID\n");
+ mutex_unlock(&listener_access_lock);
return -ENODATA;
}
-
- if (this_lstnr->rcv_req_flag == -1)
- this_lstnr->rcv_req_flag = 0;
+ mutex_unlock(&listener_access_lock);
while (1) {
if (wait_event_freezable(this_lstnr->rcv_req_wq,
@@ -3886,16 +3912,17 @@ static int qseecom_receive_req(struct qseecom_dev_handle *data)
pr_warn("Interrupted: exiting Listener Service = %d\n",
(uint32_t)data->listener.id);
/* woken up for different reason */
- this_lstnr->rcv_req_flag = -1;
return -ERESTARTSYS;
}
- if (data->abort || this_lstnr->abort) {
+ if (data->abort) {
pr_err("Aborting Listener Service = %d\n",
(uint32_t)data->listener.id);
return -ENODEV;
}
+ mutex_lock(&listener_access_lock);
this_lstnr->rcv_req_flag = 0;
+ mutex_unlock(&listener_access_lock);
break;
}
return ret;
@@ -4482,6 +4509,8 @@ int qseecom_start_app(struct qseecom_handle **handle,
uint32_t fw_size, app_arch;
uint32_t app_id = 0;
+ __qseecom_processing_pending_lsnr_unregister();
+
if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
pr_err("Not allowed to be called in %d state\n",
atomic_read(&qseecom.qseecom_state));
@@ -4655,6 +4684,8 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
unsigned long flags = 0;
bool found_handle = false;
+ __qseecom_processing_pending_lsnr_unregister();
+
if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
pr_err("Not allowed to be called in %d state\n",
atomic_read(&qseecom.qseecom_state));
@@ -4703,6 +4734,8 @@ int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
struct qseecom_dev_handle *data;
bool perf_enabled = false;
+ __qseecom_processing_pending_lsnr_unregister();
+
if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
pr_err("Not allowed to be called in %d state\n",
atomic_read(&qseecom.qseecom_state));
@@ -7012,6 +7045,11 @@ static inline long qseecom_ioctl(struct file *file,
pr_err("Aborting qseecom driver\n");
return -ENODEV;
}
+ if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
+ cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
+ cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
+ cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
+ __qseecom_processing_pending_lsnr_unregister();
switch (cmd) {
case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
@@ -7022,13 +7060,13 @@ static inline long qseecom_ioctl(struct file *file,
break;
}
pr_debug("ioctl register_listener_req()\n");
- mutex_lock(&app_access_lock);
+ mutex_lock(&listener_access_lock);
atomic_inc(&data->ioctl_count);
data->type = QSEECOM_LISTENER_SERVICE;
ret = qseecom_register_listener(data, argp);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
- mutex_unlock(&app_access_lock);
+ mutex_unlock(&listener_access_lock);
if (ret)
pr_err("failed qseecom_register_listener: %d\n", ret);
break;
@@ -7054,14 +7092,12 @@ static inline long qseecom_ioctl(struct file *file,
break;
}
pr_debug("ioctl unregister_listener_req()\n");
- __qseecom_listener_abort_all(1);
- mutex_lock(&app_access_lock);
+ mutex_lock(&listener_access_lock);
atomic_inc(&data->ioctl_count);
ret = qseecom_unregister_listener(data);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
- mutex_unlock(&app_access_lock);
- __qseecom_listener_abort_all(0);
+ mutex_unlock(&listener_access_lock);
if (ret)
pr_err("failed qseecom_unregister_listener: %d\n", ret);
break;
@@ -7216,6 +7252,7 @@ static inline long qseecom_ioctl(struct file *file,
ret = -EINVAL;
break;
}
+ mutex_lock(&listener_access_lock);
atomic_inc(&data->ioctl_count);
if (!qseecom.qsee_reentrancy_support)
ret = qseecom_send_resp();
@@ -7223,6 +7260,7 @@ static inline long qseecom_ioctl(struct file *file,
ret = qseecom_reentrancy_send_resp(data);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
+ mutex_unlock(&listener_access_lock);
if (ret)
pr_err("failed qseecom_send_resp: %d\n", ret);
break;
@@ -7558,6 +7596,7 @@ static inline long qseecom_ioctl(struct file *file,
ret = -EINVAL;
break;
}
+ mutex_lock(&listener_access_lock);
atomic_inc(&data->ioctl_count);
if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
ret = qseecom_send_modfd_resp(data, argp);
@@ -7565,6 +7604,7 @@ static inline long qseecom_ioctl(struct file *file,
ret = qseecom_send_modfd_resp_64(data, argp);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
+ mutex_unlock(&listener_access_lock);
if (ret)
pr_err("failed qseecom_send_mod_resp: %d\n", ret);
__qseecom_clean_data_sglistinfo(data);
@@ -7722,18 +7762,19 @@ static int qseecom_release(struct inode *inode, struct file *file)
{
struct qseecom_dev_handle *data = file->private_data;
int ret = 0;
+ bool free_private_data = true;
if (data->released == false) {
pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
data->type, data->mode, data);
switch (data->type) {
case QSEECOM_LISTENER_SERVICE:
- pr_warn("release lsnr svc %d\n", data->listener.id);
- __qseecom_listener_abort_all(1);
- mutex_lock(&app_access_lock);
+ pr_debug("release lsnr svc %d\n", data->listener.id);
+ free_private_data = false;
+ mutex_lock(&listener_access_lock);
ret = qseecom_unregister_listener(data);
- mutex_unlock(&app_access_lock);
- __qseecom_listener_abort_all(0);
+ data->listener.release_called = true;
+ mutex_unlock(&listener_access_lock);
break;
case QSEECOM_CLIENT_APP:
mutex_lock(&app_access_lock);
@@ -7772,8 +7813,9 @@ static int qseecom_release(struct inode *inode, struct file *file)
if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
}
- kfree(data);
+ if (free_private_data)
+ kfree(data);
return ret;
}
@@ -8600,12 +8642,13 @@ static int qseecom_probe(struct platform_device *pdev)
}
INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
- spin_lock_init(&qseecom.registered_listener_list_lock);
INIT_LIST_HEAD(&qseecom.registered_app_list_head);
spin_lock_init(&qseecom.registered_app_list_lock);
+ INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
spin_lock_init(&qseecom.registered_kclient_list_lock);
init_waitqueue_head(&qseecom.send_resp_wq);
+ init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
qseecom.send_resp_flag = 0;
qseecom.qsee_version = QSEEE_VERSION_00;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 04ac554ccd90..448aea08ec24 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1300,6 +1300,17 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt(card->host, true);
+ if (err) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ mmc_put_card(card);
+ goto cmd_done;
+ }
+ }
+
for (i = 0; i < num_of_cmds && !ioc_err; i++)
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
@@ -1307,6 +1318,12 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+
mmc_put_card(card);
/* copy to user if data and response */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 6dc29058252f..a84c7a3115da 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1076,6 +1076,7 @@ struct ipa3_nat_ipv6ct_common_mem {
* @index_table_expansion_addr: index expansion table address
* @public_ip_addr: ip address of nat table
* @pdn_mem: pdn config table SW cache memory structure
+ * @is_tmp_mem_allocated: indicate if tmp mem has been allocated
*/
struct ipa3_nat_mem {
struct ipa3_nat_ipv6ct_common_mem dev;
@@ -1083,6 +1084,7 @@ struct ipa3_nat_mem {
char *index_table_expansion_addr;
u32 public_ip_addr;
struct ipa_mem_buffer pdn_mem;
+ bool is_tmp_mem_allocated;
};
/**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 5582c9c66676..a5c5837006d7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -266,14 +266,21 @@ static void ipa3_nat_ipv6ct_destroy_device(
mutex_lock(&dev->lock);
- dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
- dev->tmp_mem->vaddr, dev->tmp_mem->dma_handle);
- kfree(dev->tmp_mem);
+ if (dev->tmp_mem != NULL &&
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated == false) {
+ dev->tmp_mem = NULL;
+ } else if (dev->tmp_mem != NULL &&
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
+ dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+ dev->tmp_mem->vaddr, dev->tmp_mem->dma_handle);
+ kfree(dev->tmp_mem);
+ dev->tmp_mem = NULL;
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
+ }
device_destroy(dev->class, dev->dev_num);
unregister_chrdev_region(dev->dev_num, 1);
class_destroy(dev->class);
dev->is_dev_init = false;
-
mutex_unlock(&dev->lock);
IPADBG("return\n");
@@ -296,10 +303,15 @@ int ipa3_nat_ipv6ct_init_devices(void)
/*
* Allocate NAT/IPv6CT temporary memory. The memory is never deleted,
* because provided to HW once NAT or IPv6CT table is deleted.
- * NULL is a legal value
*/
tmp_mem = ipa3_nat_ipv6ct_allocate_tmp_memory();
+ if (tmp_mem == NULL) {
+ IPAERR("unable to allocate tmp_mem\n");
+ return -ENOMEM;
+ }
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated = true;
+
if (ipa3_nat_ipv6ct_init_device(
&ipa3_ctx->nat_mem.dev,
IPA_NAT_DEV_NAME,
@@ -328,10 +340,11 @@ int ipa3_nat_ipv6ct_init_devices(void)
fail_init_ipv6ct_dev:
ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
fail_init_nat_dev:
- if (tmp_mem != NULL) {
+ if (tmp_mem != NULL && ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
tmp_mem->vaddr, tmp_mem->dma_handle);
kfree(tmp_mem);
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
}
return result;
}
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 07715b7bee8e..ec438df89a2a 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -341,6 +341,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(qc_opti_disable),
POWER_SUPPLY_ATTR(fcc_stepper_enable),
POWER_SUPPLY_ATTR(cc_soc),
+ POWER_SUPPLY_ATTR(qg_vbms_mode),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index f3f2c6636000..09c3fc47f013 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -1055,9 +1055,19 @@ static void ttf_work(struct work_struct *work)
struct ttf *ttf = container_of(work,
struct ttf, ttf_work.work);
int rc, ibatt_now, vbatt_now, ttf_now, charge_status;
+ int valid = 0;
ktime_t ktime_now;
mutex_lock(&ttf->lock);
+ rc = ttf->get_ttf_param(ttf->data, TTF_VALID, &valid);
+ if (rc < 0) {
+ pr_err("failed to get ttf_valid rc=%d\n", rc);
+ goto end_work;
+ }
+
+ if (!valid)
+ goto end_work;
+
rc = ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status);
if (rc < 0) {
pr_err("failed to get charge_status rc=%d\n", rc);
@@ -1198,7 +1208,16 @@ int ttf_get_time_to_empty(struct ttf *ttf, int *val)
*/
void ttf_update(struct ttf *ttf, bool input_present)
{
- int delay_ms;
+ int delay_ms, rc, valid = 0;
+
+ rc = ttf->get_ttf_param(ttf->data, TTF_VALID, &valid);
+ if (rc < 0) {
+ pr_err("failed to get ttf_valid rc=%d\n", rc);
+ return;
+ }
+
+ if (!valid)
+ return;
if (ttf->input_present == input_present)
return;
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index 4f0773b542cc..6f5f9f1c9d43 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -64,6 +64,7 @@ struct qg_dt {
bool esr_disable;
bool esr_discharge_enable;
bool qg_ext_sense;
+ bool qg_vbms_mode;
};
struct qg_esr_data {
@@ -128,6 +129,7 @@ struct qpnp_qg {
int esr_nominal;
int soh;
int soc_reporting_ready;
+ int vbms_ibat_ua;
u32 fifo_done_count;
u32 wa_flags;
u32 seq_no;
diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h
index 894e0764301c..69f2e1ed4354 100644
--- a/drivers/power/supply/qcom/qg-reg.h
+++ b/drivers/power/supply/qcom/qg-reg.h
@@ -121,6 +121,7 @@
#define QG_SDAM_MAX_OFFSET 0xA4
/* Below offset is used by PBS */
+#define QG_SDAM_SEQ_OFFSET 0xBB /* 1-byte 0xBB */
#define QG_SDAM_PON_OCV_OFFSET 0xBC /* 2-byte 0xBC-0xBD */
#endif
diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c
index a3e045e93f35..3b5065a1de25 100644
--- a/drivers/power/supply/qcom/qg-util.c
+++ b/drivers/power/supply/qcom/qg-util.c
@@ -342,6 +342,11 @@ int qg_get_battery_current(struct qpnp_qg *chip, int *ibat_ua)
return 0;
}
+ if (chip->dt.qg_vbms_mode) {
+ *ibat_ua = chip->vbms_ibat_ua;
+ return 0;
+ }
+
/* hold data */
rc = qg_masked_write(chip, chip->qg_base + QG_DATA_CTL2_REG,
BURST_AVG_HOLD_FOR_READ_BIT,
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 44abb71b7e83..5030352ea650 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1029,6 +1029,9 @@ static void process_udata_work(struct work_struct *work)
if (chip->udata.param[QG_FULL_SOC].valid)
chip->full_soc = chip->udata.param[QG_FULL_SOC].data;
+ if (chip->udata.param[QG_VBMS_IBAT].valid)
+ chip->vbms_ibat_ua = chip->udata.param[QG_VBMS_IBAT].data;
+
if (chip->udata.param[QG_SOC].valid ||
chip->udata.param[QG_SYS_SOC].valid) {
@@ -1573,7 +1576,8 @@ static int qg_get_ttf_param(void *data, enum ttf_param param, int *val)
switch (param) {
case TTF_VALID:
- *val = (!chip->battery_missing && chip->profile_loaded);
+ *val = (!chip->battery_missing && chip->profile_loaded &&
+ !chip->dt.qg_vbms_mode);
break;
case TTF_MSOC:
rc = qg_get_battery_capacity(chip, val);
@@ -1791,6 +1795,9 @@ static int qg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CC_SOC:
rc = qg_get_cc_soc(chip, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_QG_VBMS_MODE:
+ pval->intval = !!chip->dt.qg_vbms_mode;
+ break;
default:
pr_debug("Unsupported property %d\n", psp);
break;
@@ -1842,6 +1849,7 @@ static enum power_supply_property qg_psy_props[] = {
POWER_SUPPLY_PROP_ESR_NOMINAL,
POWER_SUPPLY_PROP_SOH,
POWER_SUPPLY_PROP_CC_SOC,
+ POWER_SUPPLY_PROP_QG_VBMS_MODE,
};
static const struct power_supply_desc qg_psy_desc = {
@@ -2547,7 +2555,8 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip)
int rc = 0, batt_temp = 0, i;
bool use_pon_ocv = true;
unsigned long rtc_sec = 0;
- u32 ocv_uv = 0, soc = 0, pon_soc = 0, shutdown[SDAM_MAX] = {0};
+ u32 ocv_uv = 0, soc = 0, pon_soc = 0, full_soc = 0, cutoff_soc = 0;
+ u32 shutdown[SDAM_MAX] = {0};
char ocv_type[20] = "NONE";
if (!chip->profile_loaded) {
@@ -2658,11 +2667,37 @@ use_pon_ocv:
}
ocv_uv = CAP(QG_MIN_OCV_UV, QG_MAX_OCV_UV, ocv_uv);
- rc = lookup_soc_ocv(&soc, ocv_uv, batt_temp, false);
+ rc = lookup_soc_ocv(&pon_soc, ocv_uv, batt_temp, false);
if (rc < 0) {
pr_err("Failed to lookup SOC@PON rc=%d\n", rc);
goto done;
}
+
+ rc = lookup_soc_ocv(&full_soc, chip->bp.float_volt_uv,
+ batt_temp, true);
+ if (rc < 0) {
+ pr_err("Failed to lookup FULL_SOC@PON rc=%d\n", rc);
+ goto done;
+ }
+ full_soc = CAP(0, 99, full_soc);
+
+ rc = lookup_soc_ocv(&cutoff_soc,
+ chip->dt.vbatt_cutoff_mv * 1000,
+ batt_temp, false);
+ if (rc < 0) {
+ pr_err("Failed to lookup CUTOFF_SOC@PON rc=%d\n", rc);
+ goto done;
+ }
+
+ if ((full_soc > cutoff_soc) && (pon_soc > cutoff_soc))
+ soc = DIV_ROUND_UP(((pon_soc - cutoff_soc) * 100),
+ (full_soc - cutoff_soc));
+ else
+ soc = pon_soc;
+
+ qg_dbg(chip, QG_DEBUG_PON, "v_float=%d v_cutoff=%d FULL_SOC=%d CUTOFF_SOC=%d PON_SYS_SOC=%d pon_soc=%d\n",
+ chip->bp.float_volt_uv, chip->dt.vbatt_cutoff_mv * 1000,
+ full_soc, cutoff_soc, soc, pon_soc);
}
done:
if (rc < 0) {
@@ -2812,6 +2847,9 @@ done_fifo:
}
}
+ if (chip->dt.qg_vbms_mode)
+ chip->dt.s3_entry_fifo_length = 1;
+
if (chip->dt.s3_entry_fifo_length != -EINVAL) {
if (chip->dt.s3_entry_fifo_length < 1)
chip->dt.s3_entry_fifo_length = 1;
@@ -2917,6 +2955,8 @@ done_fifo:
static int qg_post_init(struct qpnp_qg *chip)
{
+ u8 status = 0;
+
/* disable all IRQs if profile is not loaded */
if (!chip->profile_loaded) {
vote(chip->vbatt_irq_disable_votable,
@@ -2927,10 +2967,18 @@ static int qg_post_init(struct qpnp_qg *chip)
PROFILE_IRQ_DISABLE, true, 0);
}
+ if (chip->dt.qg_vbms_mode) {
+ chip->dt.esr_disable = true;
+ chip->dt.cl_disable = true;
+ }
+
/* restore ESR data */
if (!chip->dt.esr_disable)
qg_retrieve_esr_params(chip);
+ /* read STATUS2 register to clear its last state */
+ qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1);
+
return 0;
}
@@ -3411,18 +3459,24 @@ static int qg_parse_dt(struct qpnp_qg *chip)
chip->cl->dt.min_start_soc, chip->cl->dt.max_start_soc,
chip->cl->dt.min_temp, chip->cl->dt.max_temp);
}
- qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n",
+
+ chip->dt.qg_vbms_mode = of_property_read_bool(node,
+ "qcom,qg-vbms-mode");
+
+ qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d qg_vbms_mode=%d\n",
chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv,
- chip->dt.delta_soc, chip->dt.qg_ext_sense);
+ chip->dt.delta_soc, chip->dt.qg_ext_sense,
+ chip->dt.qg_vbms_mode);
return 0;
}
static int process_suspend(struct qpnp_qg *chip)
{
- u8 status = 0;
+ u8 status = 0, val;
int rc;
u32 fifo_rt_length = 0, sleep_fifo_length = 0;
+ bool process_fifo = false;
/* skip if profile is not loaded */
if (!chip->profile_loaded)
@@ -3432,6 +3486,15 @@ static int process_suspend(struct qpnp_qg *chip)
chip->suspend_data = false;
+ val = (chip->seq_no % 128) + 1;
+ rc = qg_sdam_multibyte_write(QG_SDAM_SEQ_OFFSET, &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to write sdam seq, rc=%d\n", rc);
+ return rc;
+ }
+ /* read STATUS2 register to clear its last state */
+ qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1);
+
/* ignore any suspend processing if we are charging */
if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
qg_dbg(chip, QG_DEBUG_PM, "Charging @ suspend - ignore processing\n");
@@ -3456,7 +3519,13 @@ static int process_suspend(struct qpnp_qg *chip)
* the the #fifo to enter sleep, save the FIFO data
* and reset the fifo count.
*/
- if (fifo_rt_length >= (chip->dt.s2_fifo_length - sleep_fifo_length)) {
+ if (chip->dt.qg_vbms_mode && fifo_rt_length >= 1)
+ process_fifo = true;
+ else if (fifo_rt_length >=
+ (chip->dt.s2_fifo_length - sleep_fifo_length))
+ process_fifo = true;
+
+ if (process_fifo) {
rc = qg_master_hold(chip, true);
if (rc < 0) {
pr_err("Failed to hold master, rc=%d\n", rc);
@@ -3481,9 +3550,6 @@ static int process_suspend(struct qpnp_qg *chip)
chip->suspend_data = true;
}
- /* read STATUS2 register to clear its last state */
- qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1);
-
qg_dbg(chip, QG_DEBUG_PM, "FIFO rt_length=%d sleep_fifo_length=%d default_s2_count=%d suspend_data=%d\n",
fifo_rt_length, sleep_fifo_length,
chip->dt.s2_fifo_length, chip->suspend_data);
@@ -3493,7 +3559,7 @@ static int process_suspend(struct qpnp_qg *chip)
static int process_resume(struct qpnp_qg *chip)
{
- u8 status2 = 0, rt_status = 0;
+ u8 status2 = 0, rt_status = 0, val = 0;
u32 ocv_uv = 0, ocv_raw = 0;
int rc;
@@ -3549,6 +3615,11 @@ static int process_resume(struct qpnp_qg *chip)
chip->suspend_data = false;
}
+ rc = qg_sdam_multibyte_write(QG_SDAM_SEQ_OFFSET, &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to write sdam seq, rc=%d\n", rc);
+ return rc;
+ }
schedule_delayed_work(&chip->ttf->ttf_work, 0);
return rc;
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index fa59ebf1f877..a68f2b02830f 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -773,7 +773,9 @@ static void smblib_uusb_removal(struct smb_charger *chg)
struct storm_watch *wdata;
cancel_delayed_work_sync(&chg->pl_enable_work);
- alarm_cancel(&chg->chg_termination_alarm);
+
+ if (chg->wa_flags & CHG_TERMINATION_WA)
+ alarm_cancel(&chg->chg_termination_alarm);
if (chg->wa_flags & BOOST_BACK_WA) {
data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
@@ -3522,7 +3524,9 @@ static void typec_src_removal(struct smb_charger *chg)
}
cancel_delayed_work_sync(&chg->pl_enable_work);
- alarm_cancel(&chg->chg_termination_alarm);
+
+ if (chg->wa_flags & CHG_TERMINATION_WA)
+ alarm_cancel(&chg->chg_termination_alarm);
/* reset input current limit voters */
vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
@@ -4531,11 +4535,13 @@ int smblib_deinit(struct smb_charger *chg)
alarm_cancel(&chg->moisture_protection_alarm);
cancel_work_sync(&chg->moisture_protection_work);
}
- alarm_cancel(&chg->chg_termination_alarm);
+ if (chg->wa_flags & CHG_TERMINATION_WA) {
+ alarm_cancel(&chg->chg_termination_alarm);
+ cancel_work_sync(&chg->chg_termination_work);
+ }
cancel_work_sync(&chg->bms_update_work);
cancel_work_sync(&chg->jeita_update_work);
cancel_work_sync(&chg->pl_update_work);
- cancel_work_sync(&chg->chg_termination_work);
cancel_delayed_work_sync(&chg->clear_hdc_work);
cancel_delayed_work_sync(&chg->icl_change_work);
cancel_delayed_work_sync(&chg->pl_enable_work);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e8d860df7c79..d70fc6bb9d72 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3,7 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.c
* Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -3384,7 +3384,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
}
- if (err)
+ if (err && err != -EAGAIN)
ufsdbg_set_err_state(hba);
return err;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 5640666b3cea..f131c79abeaa 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -483,6 +483,8 @@ static int fifo_read(struct edge_info *einfo, void *_data, int len)
uint32_t fifo_size = einfo->rx_fifo_size;
uint32_t n;
+ if (read_index >= fifo_size || write_index >= fifo_size)
+ return 0;
while (len) {
ptr = einfo->rx_fifo + read_index;
if (read_index <= write_index)
@@ -529,6 +531,8 @@ static int fifo_write_body(struct edge_info *einfo, const void *_data,
uint32_t fifo_size = einfo->tx_fifo_size;
uint32_t n;
+ if (read_index >= fifo_size || *write_index >= fifo_size)
+ return 0;
while (len) {
ptr = einfo->tx_fifo + *write_index;
if (*write_index < read_index) {
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 3000cf3ecded..8f965e95bddb 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1214,8 +1214,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
case PE_SRC_NEGOTIATE_CAPABILITY:
if (PD_RDO_OBJ_POS(pd->rdo) != 1 ||
PD_RDO_FIXED_CURR(pd->rdo) >
- PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps) ||
- PD_RDO_FIXED_CURR_MINMAX(pd->rdo) >
PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps)) {
/* send Reject */
ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG);
diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c
index e1e7d94d16bd..f8e44580df41 100644
--- a/drivers/video/fbdev/msm/mdp3.c
+++ b/drivers/video/fbdev/msm/mdp3.c
@@ -1354,6 +1354,8 @@ int mdp3_put_img(struct mdp3_img_data *data, int client)
pr_err("invalid ion client\n");
return -ENOMEM;
}
+ MDSS_XLOG(data->srcp_dma_buf, data->addr, data->len, client,
+ data->mapped, data->skip_detach);
if (data->mapped) {
if (client == MDP3_CLIENT_PPP ||
client == MDP3_CLIENT_DMA_P)
@@ -1520,6 +1522,13 @@ done:
mdp3_put_img(data, client);
return -EINVAL;
}
+ if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
+ MDSS_XLOG(img->memory_id, data->addr, data->len, fb_num);
+ } else if (iclient) {
+ MDSS_XLOG(img->memory_id, data->srcp_dma_buf, data->addr,
+ data->len, client, data->mapped,
+ data->skip_detach);
+ }
return ret;
clone_err:
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index 0f4a453fddea..ff7d7126df85 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -264,7 +264,7 @@ static void mdp3_vsync_retire_handle_vsync(void *arg)
schedule_work(&mdp3_session->retire_work);
}
-static void mdp3_vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
+void mdp3_vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
{
struct mdp3_session_data *mdp3_session;
@@ -274,6 +274,10 @@ static void mdp3_vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
if (mdp3_session->retire_cnt > 0) {
mdss_inc_timeline(mfd->mdp_sync_pt_data.timeline_retire, val);
mdp3_session->retire_cnt -= min(val, mdp3_session->retire_cnt);
+ pr_debug("Retire signaled! timeline val=%d remaining=%d\n",
+ mdss_get_timeline_retire_ts(
+ mfd->mdp_sync_pt_data.timeline_retire),
+ mdp3_session->retire_cnt);
}
mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
}
@@ -956,7 +960,7 @@ static int mdp3_ctrl_on(struct msm_fb_data_type *mfd)
MDSS_EVENT_UNBLANK, NULL);
rc |= panel->event_handler(panel,
MDSS_EVENT_PANEL_ON, NULL);
- if (mdss_fb_is_power_on_ulp(mfd))
+ if (mdss_fb_is_power_on_lp(mfd))
rc |= mdp3_enable_panic_ctrl();
mdp3_clk_enable(0, 0);
}
@@ -1095,7 +1099,7 @@ static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
*/
pm_runtime_get_sync(&mdp3_res->pdev->dev);
- MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_ulp(mfd),
+ MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_lp(mfd),
mfd->panel_power_state);
panel = mdp3_session->panel;
@@ -1236,9 +1240,9 @@ static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
}
}
- if (mdss_fb_is_power_on_ulp(mfd) &&
+ if (mdss_fb_is_power_on_lp(mfd) &&
(mfd->panel.type == MIPI_CMD_PANEL)) {
- pr_debug("%s: Disable MDP3 clocks in ULP\n", __func__);
+ pr_debug("%s: Disable MDP3 clocks in LP\n", __func__);
if (!mdp3_session->clk_on)
mdp3_ctrl_clk_enable(mfd, 1);
/*
@@ -1248,7 +1252,7 @@ static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
rc = mdp3_session->dma->stop(mdp3_session->dma,
mdp3_session->intf);
if (rc)
- pr_warn("fail to stop the MDP3 dma in ULP\n");
+ pr_warn("fail to stop the MDP3 dma in LP\n");
/* Wait to ensure TG to turn off */
msleep(20);
/*
@@ -3029,7 +3033,7 @@ int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
mdp3_interface->lut_update = NULL;
mdp3_interface->configure_panel = mdp3_update_panel_info;
mdp3_interface->input_event_handler = NULL;
- mdp3_interface->signal_retire_fence = NULL;
+ mdp3_interface->signal_retire_fence = mdp3_vsync_retire_signal;
mdp3_interface->is_twm_en = mdp3_is_twm_en;
mdp3_session = kzalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h
index 7fe2e5876140..032debe1aa85 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.h
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.h
@@ -36,6 +36,9 @@ struct mdp3_buffer_queue {
int pop_idx;
};
+/* struct mdp3_session_data is MDP3 fb private data */
+#define mfd_to_mdp3_data(mfd) (mfd->mdp.private1)
+
struct mdp3_session_data {
struct mutex lock;
int status;
@@ -93,5 +96,6 @@ int mdp3_ctrl_get_pack_pattern(u32 imgType);
int mdp3_ctrl_reset(struct msm_fb_data_type *mfd);
int mdp3_get_ion_client(struct msm_fb_data_type *mfd);
void mdp3_flush_dma_done(struct mdp3_session_data *mdp3_session);
+void mdp3_vsync_retire_signal(struct msm_fb_data_type *mfd, int val);
#endif /* MDP3_CTRL_H */
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index dce3940cd9ee..0ada74b4f65f 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -688,7 +688,11 @@ retry_dma_done:
}
rc = -1;
}
- ATRACE_END("mdp3_wait_for_dma_comp");
+ ATRACE_END("mdp3_wait_for_dma_comp");
+ if (rc <= 0 && retry_count == 0) {
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif",
+ "dsi0_ctrl", "dsi0_phy");
+ }
}
}
if (dma->update_src_cfg) {
@@ -1068,12 +1072,17 @@ static int mdp3_dma_stop(struct mdp3_dma *dma, struct mdp3_intf *intf)
/*
* Interrupts are disabled.
- * Check for blocked dma done interrupt.
- * Flush items waiting for dma done interrupt.
+ * Check for blocked dma done and vsync interrupt.
+ * Flush items waiting for interrupts.
*/
- if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD &&
- atomic_read(&dma->session->dma_done_cnt))
- mdp3_flush_dma_done(dma->session);
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+ if (atomic_read(&dma->session->dma_done_cnt))
+ mdp3_flush_dma_done(dma->session);
+ if (dma->session->retire_cnt) {
+ mdp3_vsync_retire_signal(dma->session->mfd,
+ dma->session->retire_cnt);
+ }
+ }
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c
index 545951094f73..48863205829e 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp.c
@@ -318,10 +318,12 @@ int mdp3_ppp_pipe_wait(void)
*/
ret = wait_for_completion_timeout(
&ppp_stat->ppp_comp, msecs_to_jiffies(200));
- if (!ret)
+ if (!ret) {
pr_err("%s: Timed out waiting for the MDP.\n",
__func__);
-
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif",
+ "dsi0_ctrl", "dsi0_phy");
+ }
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index a31c95fb1bf9..631c1ecd4f5c 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -1859,6 +1859,20 @@ static void mdss_panel_validate_debugfs_info(struct msm_fb_data_type *mfd)
}
}
+static void mdss_fb_signal_retire_fence(struct msm_fb_data_type *mfd)
+{
+#ifdef TARGET_HW_MDSS_MDP3
+ struct mdp3_session_data *mdp3_session = mfd_to_mdp3_data(mfd);
+ int retire_cnt = mdp3_session->retire_cnt;
+#else
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int retire_cnt = mdp5_data->retire_cnt;
+#endif
+
+ if (mfd->mdp.signal_retire_fence)
+ mfd->mdp.signal_retire_fence(mfd, retire_cnt);
+}
+
static int mdss_fb_blank_blank(struct msm_fb_data_type *mfd,
int req_power_state)
{
@@ -1906,10 +1920,13 @@ static int mdss_fb_blank_blank(struct msm_fb_data_type *mfd,
mfd->panel_power_state = req_power_state;
ret = mfd->mdp.off_fnc(mfd);
- if (ret)
+ if (ret) {
mfd->panel_power_state = cur_power_state;
- else if (!mdss_panel_is_power_on_interactive(req_power_state))
+ } else if (!mdss_panel_is_power_on_interactive(req_power_state)) {
mdss_fb_release_fences(mfd);
+ if (mfd->panel.type == MIPI_CMD_PANEL)
+ mdss_fb_signal_retire_fence(mfd);
+ }
mfd->op_enable = true;
complete(&mfd->power_off_comp);
@@ -3696,9 +3713,8 @@ static int __mdss_fb_perform_commit(struct msm_fb_data_type *mfd)
*/
mdss_fb_release_kickoff(mfd);
mdss_fb_signal_timeline(sync_pt_data);
- if ((mfd->panel.type == MIPI_CMD_PANEL) &&
- (mfd->mdp.signal_retire_fence))
- mfd->mdp.signal_retire_fence(mfd, 1);
+ if (mfd->panel.type == MIPI_CMD_PANEL)
+ mdss_fb_signal_retire_fence(mfd);
return ret;
}
if (!sync_pt_data->async_wait_fences)
@@ -3756,9 +3772,8 @@ skip_commit:
if (IS_ERR_VALUE((unsigned long)ret) || !sync_pt_data->flushed) {
mdss_fb_release_kickoff(mfd);
mdss_fb_signal_timeline(sync_pt_data);
- if ((mfd->panel.type == MIPI_CMD_PANEL) &&
- (mfd->mdp.signal_retire_fence))
- mfd->mdp.signal_retire_fence(mfd, 1);
+ if (mfd->panel.type == MIPI_CMD_PANEL)
+ mdss_fb_signal_retire_fence(mfd);
}
if (dynamic_dsi_switch) {
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index c213f70942d5..a9a878e6bcec 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -517,7 +517,8 @@ int mdss_smmu_fault_handler(struct iommu_domain *domain, struct device *dev,
} else {
pr_err("mdss_smmu: iova:0x%lx flags:0x%x\n",
iova, flags);
- MDSS_XLOG_TOUT_HANDLER("mdp");
+ MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "dsi0_ctrl",
+ "dsi0_phy");
}
end:
return -ENODEV;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 6ec5e52e16ec..93b79a4b1dcc 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -304,6 +304,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
POWER_SUPPLY_PROP_CC_SOC,
+ POWER_SUPPLY_PROP_QG_VBMS_MODE,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 99948cbb198d..b059fd2cffc2 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -34,6 +34,8 @@ extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_sched_walt_rotate_big_tasks;
+extern unsigned int sysctl_sched_min_task_util_for_boost_colocation;
+extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;
extern int
walt_proc_update_handler(struct ctl_table *table, int write,
@@ -135,4 +137,9 @@ extern int sysctl_schedstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+#ifdef CONFIG_SCHED_WALT
+extern int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table,
+ int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+#endif
#endif /* _SCHED_SYSCTL_H */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index b3ec962785f8..be3d8b29405b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -596,8 +596,14 @@ DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
TRACE_EVENT(sched_load_to_gov,
- TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, u64 freq_aggr_thresh, u64 load, int policy, int big_task_rotation),
- TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy, big_task_rotation),
+ TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
+ u64 freq_aggr_thresh, u64 load, int policy,
+ int big_task_rotation,
+ unsigned int sysctl_sched_little_cluster_coloc_fmin_khz,
+ u64 coloc_boost_load),
+ TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy,
+ big_task_rotation, sysctl_sched_little_cluster_coloc_fmin_khz,
+ coloc_boost_load),
TP_STRUCT__entry(
__field( int, cpu )
@@ -613,6 +619,9 @@ TRACE_EVENT(sched_load_to_gov,
__field( u64, pl )
__field( u64, load )
__field( int, big_task_rotation )
+ __field(unsigned int,
+ sysctl_sched_little_cluster_coloc_fmin_khz)
+ __field( u64, coloc_boost_load )
),
TP_fast_assign(
@@ -629,14 +638,19 @@ TRACE_EVENT(sched_load_to_gov,
__entry->pl = rq->walt_stats.pred_demands_sum;
__entry->load = load;
__entry->big_task_rotation = big_task_rotation;
+ __entry->sysctl_sched_little_cluster_coloc_fmin_khz =
+ sysctl_sched_little_cluster_coloc_fmin_khz;
+ __entry->coloc_boost_load = coloc_boost_load;
),
- TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
+ TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d sysctl_sched_little_cluster_coloc_fmin_khz=%u coloc_boost_load=%llu",
__entry->cpu, __entry->policy, __entry->ed_task_pid,
__entry->aggr_grp_load, __entry->freq_aggr_thresh,
__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
- __entry->big_task_rotation)
+ __entry->big_task_rotation,
+ __entry->sysctl_sched_little_cluster_coloc_fmin_khz,
+ __entry->coloc_boost_load)
);
#endif
diff --git a/include/uapi/linux/qg.h b/include/uapi/linux/qg.h
index 54aa36261980..8bd220fbc6a8 100644
--- a/include/uapi/linux/qg.h
+++ b/include/uapi/linux/qg.h
@@ -21,7 +21,7 @@ enum qg {
QG_FULL_SOC,
QG_CLEAR_LEARNT_DATA,
QG_SYS_SOC,
- QG_RESERVED_10,
+ QG_VBMS_IBAT,
QG_MAX,
};
@@ -34,6 +34,7 @@ enum qg {
#define QG_FULL_SOC QG_FULL_SOC
#define QG_CLEAR_LEARNT_DATA QG_CLEAR_LEARNT_DATA
#define QG_SYS_SOC QG_SYS_SOC
+#define QG_VBMS_IBAT QG_VBMS_IBAT
struct fifo_data {
unsigned int v;
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
index e006463d2aa6..db433542860b 100644
--- a/include/uapi/media/cam_defs.h
+++ b/include/uapi/media/cam_defs.h
@@ -18,6 +18,10 @@
#define CAM_FLUSH_REQ (CAM_COMMON_OPCODE_BASE + 0x8)
#define CAM_COMMON_OPCODE_MAX (CAM_COMMON_OPCODE_BASE + 0x9)
+#define CAM_COMMON_OPCODE_BASE_v2 0x150
+#define CAM_ACQUIRE_HW (CAM_COMMON_OPCODE_BASE_v2 + 0x1)
+#define CAM_RELEASE_HW (CAM_COMMON_OPCODE_BASE_v2 + 0x2)
+
#define CAM_EXT_OPCODE_BASE 0x200
#define CAM_CONFIG_DEV_EXTERNAL (CAM_EXT_OPCODE_BASE + 0x1)
@@ -43,6 +47,9 @@
#define CAM_CMD_BUF_GENERIC 0x9
#define CAM_CMD_BUF_LEGACY 0xA
+/* UBWC API Version */
+#define CAM_UBWC_CFG_VERSION_1 1
+
/**
* enum flush_type_t - Identifies the various flush types
*
@@ -236,6 +243,34 @@ struct cam_plane_cfg {
};
/**
+ * struct cam_ubwc_plane_cfg_v1 - UBWC Plane configuration info
+ *
+ * @port_type: Port Type
+ * @meta_stride: UBWC metadata stride
+ * @meta_size: UBWC metadata plane size
+ * @meta_offset: UBWC metadata offset
+ * @packer_config: UBWC packer config
+ * @mode_config_0: UBWC mode config 0
+ * @mode_config_1: UBWC 3 mode config 1
+ * @tile_config: UBWC tile config
+ * @h_init: UBWC horizontal initial coordinate in pixels
+ * @v_init: UBWC vertical initial coordinate in lines
+ *
+ */
+struct cam_ubwc_plane_cfg_v1 {
+ uint32_t port_type;
+ uint32_t meta_stride;
+ uint32_t meta_size;
+ uint32_t meta_offset;
+ uint32_t packer_config;
+ uint32_t mode_config_0;
+ uint32_t mode_config_1;
+ uint32_t tile_config;
+ uint32_t h_init;
+ uint32_t v_init;
+};
+
+/**
* struct cam_cmd_buf_desc - Command buffer descriptor
*
* @mem_handle: Command buffer handle
@@ -452,6 +487,73 @@ struct cam_acquire_dev_cmd {
uint64_t resource_hdl;
};
+/*
+ * In old version, while acquiring device the num_resources in
+ * struct cam_acquire_dev_cmd will be a valid value. During ACQUIRE_DEV
+ * KMD driver will return dev_handle as well as associate HW to handle.
+ * If num_resources is set to the constant below, we are using
+ * the new version and we do not acquire HW in ACQUIRE_DEV IOCTL.
+ * ACQUIRE_DEV will only return handle and we should receive
+ * ACQUIRE_HW IOCTL after ACQUIRE_DEV and that is when the HW
+ * is associated with the dev_handle.
+ *
+ * (Data type): uint32_t
+ */
+#define CAM_API_COMPAT_CONSTANT 0xFEFEFEFE
+
+#define CAM_ACQUIRE_HW_STRUCT_VERSION_1 1
+
+/**
+ * struct cam_acquire_hw_cmd_v1 - Control payload for acquire HW IOCTL (Ver 1)
+ *
+ * @struct_version: = CAM_ACQUIRE_HW_STRUCT_VERSION_1 for this struct
+ * This value should be the first 32-bits in any structure
+ * related to this IOCTL. So that if the struct needs to
+ * change, we can first read the starting 32-bits, get the
+ * version number and then typecast the data to struct
+ * accordingly.
+ * @reserved: Reserved field for 64-bit alignment
+ * @session_handle: Session handle for the acquire command
+ * @dev_handle: Device handle to be returned
+ * @handle_type: Tells you how to interpret the variable resource_hdl-
+ * 1 = user pointer, 2 = mem handle
+ * @data_size: Total size of data contained in memory pointed
+ * to by resource_hdl
+ * @resource_hdl: Resource handle that refers to the actual
+ * resource data.
+ */
+struct cam_acquire_hw_cmd_v1 {
+ uint32_t struct_version;
+ uint32_t reserved;
+ int32_t session_handle;
+ int32_t dev_handle;
+ uint32_t handle_type;
+ uint32_t data_size;
+ uint64_t resource_hdl;
+};
+
+#define CAM_RELEASE_HW_STRUCT_VERSION_1 1
+
+/**
+ * struct cam_release_hw_cmd_v1 - Control payload for release HW IOCTL (Ver 1)
+ *
+ * @struct_version: = CAM_RELEASE_HW_STRUCT_VERSION_1 for this struct
+ * This value should be the first 32-bits in any structure
+ * related to this IOCTL. So that if the struct needs to
+ * change, we can first read the starting 32-bits, get the
+ * version number and then typecast the data to struct
+ * accordingly.
+ * @reserved: Reserved field for 64-bit alignment
+ * @session_handle: Session handle for the release
+ * @dev_handle: Device handle for the release
+ */
+struct cam_release_hw_cmd_v1 {
+ uint32_t struct_version;
+ uint32_t reserved;
+ int32_t session_handle;
+ int32_t dev_handle;
+};
+
/**
* struct cam_flush_dev_cmd - Control payload for flush devices
*
@@ -474,4 +576,53 @@ struct cam_flush_dev_cmd {
int64_t req_id;
};
+/**
+ * struct cam_ubwc_config - UBWC Configuration Payload
+ *
+ * @api_version: UBWC config api version
+ * @num_ports: Number of ports to be configured
+ * @ubwc_plane_config: Array of UBWC configurations per port
+ * Size [CAM_PACKET_MAX_PLANES - 1] per port
+ * as UBWC is supported on Y & C planes
+ * and therefore a max size of 2 planes
+ *
+ */
+struct cam_ubwc_config {
+ uint32_t api_version;
+ uint32_t num_ports;
+ struct cam_ubwc_plane_cfg_v1
+ ubwc_plane_cfg[1][CAM_PACKET_MAX_PLANES - 1];
+};
+
+/**
+ * struct cam_cmd_mem_region_info -
+ * Cmd buffer region info
+ *
+ * @mem_handle : Memory handle of the region
+ * @offset : Offset if any
+ * @size : Size of the region
+ * @flags : Flags if any
+ */
+struct cam_cmd_mem_region_info {
+ int32_t mem_handle;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t flags;
+};
+
+/**
+ * struct cam_cmd_mem_regions -
+ * List of multiple memory descriptors of
+ * of different regions
+ *
+ * @version : Version number
+ * @num_regions : Number of regions
+ * @map_info_array : Array of all the regions
+ */
+struct cam_cmd_mem_regions {
+ uint32_t version;
+ uint32_t num_regions;
+ struct cam_cmd_mem_region_info map_info_array[1];
+};
+
#endif /* __UAPI_CAM_DEFS_H__ */
diff --git a/include/uapi/media/cam_icp.h b/include/uapi/media/cam_icp.h
index 680d05b630a6..f2c1c91692d4 100644
--- a/include/uapi/media/cam_icp.h
+++ b/include/uapi/media/cam_icp.h
@@ -20,6 +20,8 @@
/* packet opcode types */
#define CAM_ICP_OPCODE_IPE_UPDATE 0
#define CAM_ICP_OPCODE_BPS_UPDATE 1
+#define CAM_ICP_OPCODE_IPE_SETTINGS 2
+#define CAM_ICP_OPCODE_BPS_SETTINGS 3
/* IPE input port resource type */
#define CAM_ICP_IPE_INPUT_IMAGE_FULL 0x0
@@ -62,6 +64,8 @@
/* Generic blob types */
#define CAM_ICP_CMD_GENERIC_BLOB_CLK 0x1
#define CAM_ICP_CMD_GENERIC_BLOB_CFG_IO 0x2
+#define CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_MAP 0x3
+#define CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_UNMAP 0x4
/**
* struct cam_icp_clk_bw_request
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
index afd109fcb471..2344b223ec0d 100644
--- a/include/uapi/media/cam_isp.h
+++ b/include/uapi/media/cam_isp.h
@@ -84,9 +84,11 @@
#define CAM_ISP_DSP_MODE_ROUND 2
/* ISP Generic Cmd Buffer Blob types */
-#define CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG 0
-#define CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG 1
-#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG 2
+#define CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG 0
+#define CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG 1
+#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG 2
+#define CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG 3
+#define CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG 4
/* Query devices */
/**
@@ -342,6 +344,15 @@ struct cam_isp_clock_config {
} __attribute__((packed));
/**
+ * struct cam_isp_csid_clock_config - CSID clock configuration
+ *
+ * @csid_clock CSID clock
+ */
+struct cam_isp_csid_clock_config {
+ uint64_t csid_clock;
+} __attribute__((packed));
+
+/**
* struct cam_isp_bw_vote - Bandwidth vote information
*
* @resource_id: Resource ID
@@ -375,4 +386,38 @@ struct cam_isp_bw_config {
struct cam_isp_bw_vote rdi_vote[1];
} __attribute__((packed));
+
+/* Acquire Device/HW v2 */
+
+/**
+ * struct cam_isp_acquire_hw_info - ISP acquire HW params
+ *
+ * @common_info_version : Version of common info struct used
+ * @common_info_size : Size of common info struct used
+ * @common_info_offset : Offset of common info from start of data
+ * @num_inputs : Number of inputs
+ * @input_info_version : Version of input info struct used
+ * @input_info_size : Size of input info struct used
+ * @input_info_offset : Offset of input info from start of data
+ * @data : Start of data region
+ */
+struct cam_isp_acquire_hw_info {
+ uint16_t common_info_version;
+ uint16_t common_info_size;
+ uint32_t common_info_offset;
+ uint32_t num_inputs;
+ uint32_t input_info_version;
+ uint32_t input_info_size;
+ uint32_t input_info_offset;
+ uint64_t data;
+};
+
+#define CAM_ISP_ACQUIRE_COMMON_VER0 0x1000
+
+#define CAM_ISP_ACQUIRE_COMMON_SIZE_VER0 0x0
+
+#define CAM_ISP_ACQUIRE_INPUT_VER0 0x2000
+
+#define CAM_ISP_ACQUIRE_OUT_VER0 0x3000
+
#endif /* __UAPI_CAM_ISP_H__ */
diff --git a/include/uapi/media/cam_isp_ife.h b/include/uapi/media/cam_isp_ife.h
index f5e72813fc0d..b806befa2bed 100644
--- a/include/uapi/media/cam_isp_ife.h
+++ b/include/uapi/media/cam_isp_ife.h
@@ -23,10 +23,13 @@
#define CAM_ISP_IFE_OUT_RES_STATS_RS (CAM_ISP_IFE_OUT_RES_BASE + 16)
#define CAM_ISP_IFE_OUT_RES_STATS_CS (CAM_ISP_IFE_OUT_RES_BASE + 17)
#define CAM_ISP_IFE_OUT_RES_STATS_IHIST (CAM_ISP_IFE_OUT_RES_BASE + 18)
-#define CAM_ISP_IFE_OUT_RES_MAX (CAM_ISP_IFE_OUT_RES_BASE + 19)
+#define CAM_ISP_IFE_OUT_RES_FULL_DISP (CAM_ISP_IFE_OUT_RES_BASE + 19)
+#define CAM_ISP_IFE_OUT_RES_DS4_DISP (CAM_ISP_IFE_OUT_RES_BASE + 20)
+#define CAM_ISP_IFE_OUT_RES_DS16_DISP (CAM_ISP_IFE_OUT_RES_BASE + 21)
+#define CAM_ISP_IFE_OUT_RES_2PD (CAM_ISP_IFE_OUT_RES_BASE + 22)
+#define CAM_ISP_IFE_OUT_RES_MAX (CAM_ISP_IFE_OUT_RES_BASE + 23)
-
-/* IFE input port resource type (global unique) */
+/*IFE input port resource type (global unique) */
#define CAM_ISP_IFE_IN_RES_BASE 0x4000
#define CAM_ISP_IFE_IN_RES_TPG (CAM_ISP_IFE_IN_RES_BASE + 0)
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index ae65649964ff..0b4f1cc40df3 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -244,6 +244,7 @@ struct cam_req_mgr_link_control {
#define CAM_MEM_FLAG_PACKET_BUF_TYPE (1<<9)
#define CAM_MEM_FLAG_CACHE (1<<10)
#define CAM_MEM_FLAG_HW_SHARED_ACCESS (1<<11)
+#define CAM_MEM_FLAG_CDSP_OUTPUT (1<<12)
#define CAM_MEM_MMU_MAX_HANDLE 16
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index 74a8d9359d34..271a73102f26 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -26,9 +26,16 @@
#define ISP_STATS_STREAM_BIT 0x80000000
#define VFE_HW_LIMIT 1
+#define ISP_KERNEL_STATE 1
struct msm_vfe_cfg_cmd_list;
+struct isp_kstate {
+ uint32_t kernel_sofid;
+ uint32_t drop_reconfig;
+ uint32_t vfeid;
+};
+
enum ISP_START_PIXEL_PATTERN {
ISP_BAYER_RGRGRG,
ISP_BAYER_GRGRGR,
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
index 01daf82926ce..08ada300edc5 100644
--- a/kernel/sched/energy.c
+++ b/kernel/sched/energy.c
@@ -301,6 +301,8 @@ static int sched_energy_probe(struct platform_device *pdev)
walt_sched_energy_populated_callback();
}
+ walt_map_freq_to_load();
+
dev_info(&pdev->dev, "Sched-energy-costs capacity updated\n");
return 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e33d11dcf4a9..e213b3e498d8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -166,6 +166,10 @@ unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */
unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */
#define capacity_margin sysctl_sched_capacity_margin
+#ifdef CONFIG_SCHED_WALT
+unsigned int sysctl_sched_min_task_util_for_boost_colocation;
+#endif
+
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
@@ -6156,8 +6160,7 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
if (capacity == max_capacity)
return true;
- if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
- task_sched_boost(p))
+ if (task_boost_policy(p) == SCHED_BOOST_ON_BIG)
return false;
return __task_fits(p, cpu, 0);
@@ -6823,7 +6826,7 @@ static int cpu_util_wake(int cpu, struct task_struct *p)
struct find_best_target_env {
struct cpumask *rtg_target;
bool need_idle;
- bool placement_boost;
+ int placement_boost;
bool avoid_prev_cpu;
};
@@ -6875,37 +6878,30 @@ static bool is_packing_eligible(struct task_struct *p, int target_cpu,
return (estimated_capacity <= capacity_curr_of(target_cpu));
}
-static inline bool skip_sg(struct task_struct *p, struct sched_group *sg,
- struct cpumask *rtg_target)
+static int start_cpu(struct task_struct *p, bool boosted,
+ struct cpumask *rtg_target)
{
- int fcpu = group_first_cpu(sg);
-
- /* Are all CPUs isolated in this group? */
- if (!sg->group_weight)
- return true;
-
- /*
- * Don't skip a group if a task affinity allows it
- * to run only on that group.
- */
- if (cpumask_subset(tsk_cpus_allowed(p), sched_group_cpus(sg)))
- return false;
+ struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+ int start_cpu = -1;
- if (!task_fits_max(p, fcpu))
- return true;
+ if (boosted)
+ return rd->max_cap_orig_cpu;
- if (rtg_target && !cpumask_test_cpu(fcpu, rtg_target))
- return true;
+ /* A task always fits on its rtg_target */
+ if (rtg_target) {
+ int rtg_target_cpu = cpumask_first_and(rtg_target,
+ cpu_online_mask);
- return false;
-}
-
-static int start_cpu(bool boosted)
-{
- struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
- int start_cpu;
+ if (rtg_target_cpu < nr_cpu_ids)
+ return rtg_target_cpu;
+ }
- start_cpu = boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
+ /* Where the task should land based on its demand */
+ if (rd->min_cap_orig_cpu != -1
+ && task_fits_max(p, rd->min_cap_orig_cpu))
+ start_cpu = rd->min_cap_orig_cpu;
+ else
+ start_cpu = rd->max_cap_orig_cpu;
return walt_start_cpu(start_cpu);
}
@@ -6929,6 +6925,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
int best_idle_cpu = -1;
int target_cpu = -1;
int cpu, i;
+ long spare_cap, most_spare_cap = 0;
+ int most_spare_cap_cpu = -1;
unsigned int active_cpus_count = 0;
int isolated_candidate = -1;
int prev_cpu = task_cpu(p);
@@ -6939,7 +6937,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
schedstat_inc(this_rq()->eas_stats.fbt_attempts);
/* Find start CPU based on boost value */
- cpu = start_cpu(boosted);
+ cpu = start_cpu(p, boosted, fbt_env->rtg_target);
if (cpu < 0) {
schedstat_inc(p->se.statistics.nr_wakeups_fbt_no_cpu);
schedstat_inc(this_rq()->eas_stats.fbt_no_cpu);
@@ -6960,9 +6958,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
cpumask_t search_cpus;
bool do_rotate = false, avoid_prev_cpu = false;
- if (skip_sg(p, sg, fbt_env->rtg_target))
- continue;
-
cpumask_copy(&search_cpus, tsk_cpus_allowed(p));
cpumask_and(&search_cpus, &search_cpus, sched_group_cpus(sg));
i = find_first_cpu_bit(p, &search_cpus, sg, &avoid_prev_cpu,
@@ -6997,6 +6992,12 @@ retry:
*/
wake_util = cpu_util_wake(i, p);
new_util = wake_util + task_util(p);
+ spare_cap = capacity_orig_of(i) - wake_util;
+
+ if (spare_cap > most_spare_cap) {
+ most_spare_cap = spare_cap;
+ most_spare_cap_cpu = i;
+ }
/*
* Ensure minimum capacity to grant the required boost.
@@ -7215,7 +7216,7 @@ retry:
if (best_idle_cpu != -1)
break;
- if (fbt_env->placement_boost) {
+ if (fbt_env->placement_boost != SCHED_BOOST_NONE) {
target_capacity = ULONG_MAX;
continue;
}
@@ -7232,6 +7233,13 @@ retry:
target_capacity = ULONG_MAX;
}
+ /*
+ * if we have found a target cpu within a group, don't bother
+ * checking other groups.
+ */
+ if (target_capacity != ULONG_MAX)
+ break;
+
} while (sg = sg->next, sg != sd->groups);
if (best_idle_cpu != -1 && !is_packing_eligible(p, target_cpu, fbt_env,
@@ -7272,6 +7280,11 @@ retry:
? best_active_cpu
: best_idle_cpu;
+ if (target_cpu == -1 && most_spare_cap_cpu != -1 &&
+ /* ensure we use active cpu for active migration */
+ !(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu)))
+ target_cpu = most_spare_cap_cpu;
+
if (target_cpu == -1 && cpu_isolated(prev_cpu) &&
isolated_candidate != -1) {
target_cpu = isolated_candidate;
@@ -7385,7 +7398,9 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)
rcu_read_lock();
grp = task_related_thread_group(p);
- if (grp && grp->preferred_cluster) {
+ if (grp && grp->preferred_cluster &&
+ (task_util(p) >
+ sysctl_sched_min_task_util_for_boost_colocation)) {
rtg_target = &grp->preferred_cluster->cpus;
if (!task_fits_max(p, cpumask_first(rtg_target)))
rtg_target = NULL;
@@ -7443,9 +7458,8 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
} else {
fbt_env.need_idle = wake_to_idle(p);
}
- fbt_env.placement_boost = task_sched_boost(p) ?
- sched_boost_policy() != SCHED_BOOST_NONE :
- false;
+
+ fbt_env.placement_boost = task_boost_policy(p);
fbt_env.avoid_prev_cpu = false;
if (prefer_idle || fbt_env.need_idle)
@@ -8368,8 +8382,12 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
return 0;
}
+ /* Record that we found atleast one task that could run on dst_cpu */
+ env->flags &= ~LBF_ALL_PINNED;
+
if (energy_aware() && !env->dst_rq->rd->overutilized &&
- env->idle == CPU_NEWLY_IDLE) {
+ env->idle == CPU_NEWLY_IDLE &&
+ !task_in_related_thread_group(p)) {
long util_cum_dst, util_cum_src;
unsigned long demand;
@@ -8381,9 +8399,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
return 0;
}
- /* Record that we found atleast one task that could run on dst_cpu */
- env->flags &= ~LBF_ALL_PINNED;
-
#ifdef CONFIG_SCHED_WALT
if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
!preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p))
@@ -9644,8 +9659,23 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* a think about bumping its value to force at least one task to be
* moved
*/
- if (env->imbalance < busiest->load_per_task)
+ if (env->imbalance < busiest->load_per_task) {
+ /*
+ * The busiest group is overloaded so it could use help
+ * from the other groups. If the local group has idle CPUs
+ * and it is not overloaded and has no imbalance with in
+ * the group, allow the load balance by bumping the
+ * imbalance.
+ */
+ if (busiest->group_type == group_overloaded &&
+ local->group_type <= group_misfit_task &&
+ env->idle != CPU_NOT_IDLE) {
+ env->imbalance = busiest->load_per_task;
+ return;
+ }
+
return fix_small_imbalance(env, sds);
+ }
}
/******* find_busiest_group() helpers end here *********************/
@@ -9676,7 +9706,6 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
if (energy_aware() && !env->dst_rq->rd->overutilized) {
int cpu_local, cpu_busiest;
- long util_cum;
unsigned long energy_local, energy_busiest;
if (env->idle != CPU_NEWLY_IDLE)
@@ -9696,10 +9725,6 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
} else if (energy_local == energy_busiest) {
if (cpu_rq(cpu_busiest)->nr_running < 2)
goto out_balanced;
-
- util_cum = cpu_util_cum(cpu_busiest, 0);
- if (util_cum < cpu_util_cum(cpu_local, 0))
- goto out_balanced;
}
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index bb9cf4a9fd04..5049e3deea2b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -85,6 +85,7 @@ struct sched_cluster {
int notifier_sent;
bool wake_up_idle;
u64 aggr_grp_load;
+ u64 coloc_boost_load;
};
extern unsigned int sched_disable_window_stats;
@@ -2693,6 +2694,32 @@ static inline unsigned int power_cost(int cpu, bool max)
extern void walt_sched_energy_populated_callback(void);
extern void walt_update_min_max_capacity(void);
+static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
+{
+ enum sched_boost_policy boost_on_big = task_sched_boost(p) ?
+ sched_boost_policy() : SCHED_BOOST_NONE;
+
+ if (boost_on_big) {
+ /*
+ * Filter out tasks less than min task util threshold
+ * under conservative boost.
+ */
+ if (sysctl_sched_boost == CONSERVATIVE_BOOST &&
+ task_util(p) <=
+ sysctl_sched_min_task_util_for_boost_colocation)
+ boost_on_big = SCHED_BOOST_NONE;
+ }
+
+ return boost_on_big;
+}
+
+extern void walt_map_freq_to_load(void);
+
+static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
+{
+ return is_min_capacity_cpu(cluster_first_cpu(cluster));
+}
+
#else /* CONFIG_SCHED_WALT */
struct walt_sched_stats;
@@ -2703,7 +2730,10 @@ static inline bool task_sched_boost(struct task_struct *p)
{
return true;
}
-
+static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
+{
+ return SCHED_BOOST_NONE;
+}
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
static inline int sched_boost(void)
@@ -2827,6 +2857,7 @@ static inline unsigned int power_cost(int cpu, bool max)
static inline void walt_sched_energy_populated_callback(void) { }
static inline void walt_update_min_max_capacity(void) { }
+static inline void walt_map_freq_to_load(void) { }
#endif /* CONFIG_SCHED_WALT */
static inline bool energy_aware(void)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 43b84c9e34b4..5639d7b0873d 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -522,6 +522,7 @@ u64 freq_policy_load(struct rq *rq)
struct sched_cluster *cluster = rq->cluster;
u64 aggr_grp_load = cluster->aggr_grp_load;
u64 load, tt_load = 0;
+ u64 coloc_boost_load = cluster->coloc_boost_load;
if (rq->ed_task != NULL) {
load = sched_ravg_window;
@@ -533,6 +534,9 @@ u64 freq_policy_load(struct rq *rq)
else
load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+ if (coloc_boost_load)
+ load = max_t(u64, load, coloc_boost_load);
+
tt_load = top_task_load(rq);
switch (reporting_policy) {
case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
@@ -549,7 +553,9 @@ u64 freq_policy_load(struct rq *rq)
done:
trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, freq_aggr_thresh,
- load, reporting_policy, walt_rotation_enabled);
+ load, reporting_policy, walt_rotation_enabled,
+ sysctl_sched_little_cluster_coloc_fmin_khz,
+ coloc_boost_load);
return load;
}
@@ -2384,6 +2390,7 @@ struct sched_cluster init_cluster = {
.notifier_sent = 0,
.wake_up_idle = 0,
.aggr_grp_load = 0,
+ .coloc_boost_load = 0,
};
void init_clusters(void)
@@ -2626,7 +2633,6 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
{
struct task_struct *p;
u64 combined_demand = 0;
- bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG;
bool group_boost = false;
u64 wallclock;
@@ -2650,7 +2656,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp)
return;
list_for_each_entry(p, &grp->tasks, grp_list) {
- if (boost_on_big && task_sched_boost(p)) {
+ if (task_boost_policy(p) == SCHED_BOOST_ON_BIG) {
group_boost = true;
break;
}
@@ -3160,6 +3166,70 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
}
+unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;
+static u64 coloc_boost_load;
+
+void walt_map_freq_to_load(void)
+{
+ struct sched_cluster *cluster;
+
+ for_each_sched_cluster(cluster) {
+ if (is_min_capacity_cluster(cluster)) {
+ int fcpu = cluster_first_cpu(cluster);
+
+ coloc_boost_load = div64_u64(
+ ((u64)sched_ravg_window *
+ arch_scale_cpu_capacity(NULL, fcpu) *
+ sysctl_sched_little_cluster_coloc_fmin_khz),
+ (u64)1024 * cpu_max_possible_freq(fcpu));
+ coloc_boost_load = div64_u64(coloc_boost_load << 2, 5);
+ break;
+ }
+ }
+}
+
+static void walt_update_coloc_boost_load(void)
+{
+ struct related_thread_group *grp;
+ struct sched_cluster *cluster;
+
+ if (!sysctl_sched_little_cluster_coloc_fmin_khz ||
+ sysctl_sched_boost == CONSERVATIVE_BOOST)
+ return;
+
+ grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+ if (!grp || !grp->preferred_cluster ||
+ is_min_capacity_cluster(grp->preferred_cluster))
+ return;
+
+ for_each_sched_cluster(cluster) {
+ if (is_min_capacity_cluster(cluster)) {
+ cluster->coloc_boost_load = coloc_boost_load;
+ break;
+ }
+ }
+}
+
+int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table,
+ int write, void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ goto done;
+
+ walt_map_freq_to_load();
+
+done:
+ mutex_unlock(&mutex);
+ return ret;
+}
+
/*
* Runs in hard-irq context. This should ideally run just after the latest
* window roll-over.
@@ -3169,7 +3239,7 @@ void walt_irq_work(struct irq_work *irq_work)
struct sched_cluster *cluster;
struct rq *rq;
int cpu;
- u64 wc;
+ u64 wc, total_grp_load = 0;
int flag = SCHED_CPUFREQ_WALT;
bool is_migration = false;
int level = 0;
@@ -3205,10 +3275,15 @@ void walt_irq_work(struct irq_work *irq_work)
}
cluster->aggr_grp_load = aggr_grp_load;
+ total_grp_load = aggr_grp_load;
+ cluster->coloc_boost_load = 0;
raw_spin_unlock(&cluster->load_lock);
}
+ if (total_grp_load)
+ walt_update_coloc_boost_load();
+
for_each_sched_cluster(cluster) {
for_each_cpu(cpu, &cluster->cpus) {
int nflag = flag;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 75ae65639d38..659f3df652a8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -127,8 +127,9 @@ static int __maybe_unused three = 3;
static int __maybe_unused four = 4;
static unsigned long one_ul = 1;
static int one_hundred = 100;
-#ifdef CONFIG_PERF_EVENTS
-static int one_thousand = 1000;
+static int __maybe_unused one_thousand = 1000;
+#ifdef CONFIG_SCHED_WALT
+static int two_million = 2000000;
#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
@@ -357,6 +358,24 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "sched_min_task_util_for_boost_colocation",
+ .data = &sysctl_sched_min_task_util_for_boost_colocation,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one_thousand,
+ },
+ {
+ .procname = "sched_little_cluster_coloc_fmin_khz",
+ .data = &sysctl_sched_little_cluster_coloc_fmin_khz,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_little_cluster_coloc_fmin_khz_handler,
+ .extra1 = &zero,
+ .extra2 = &two_million,
+ },
#endif
{
.procname = "sched_upmigrate",
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index d397432023be..af9a29ec90a6 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base)
spin_lock_irq(&base->lock);
+ /*
+ * timer_base::must_forward_clk must be cleared before running
+ * timers so that any timer functions that call mod_timer() will
+ * not try to forward the base. Idle tracking / clock forwarding
+ * logic is only used with BASE_STD timers.
+ *
+ * The must_forward_clk flag is cleared unconditionally also for
+ * the deferrable base. The deferrable base is not affected by idle
+ * tracking and never forwarded, so clearing the flag is a NOOP.
+ *
+ * The fact that the deferrable base is never forwarded can cause
+ * large variations in granularity for deferrable timers, but they
+ * can be deferred for long periods due to idle anyway.
+ */
+ base->must_forward_clk = false;
+
while (time_after_eq(jiffies, base->clk)) {
levels = collect_expired_timers(base, heads);
@@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
- /*
- * must_forward_clk must be cleared before running timers so that any
- * timer functions that call mod_timer will not try to forward the
- * base. idle trcking / clock forwarding logic is only used with
- * BASE_STD timers.
- *
- * The deferrable base does not do idle tracking at all, so we do
- * not forward it. This can result in very large variations in
- * granularity for deferrable timers, but they can be deferred for
- * long periods due to idle.
- */
- base->must_forward_clk = false;
-
__run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
diff --git a/mm/memory.c b/mm/memory.c
index 7a88700906d2..ccf1a6b9f7e1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -943,6 +943,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
spinlock_t *src_ptl, *dst_ptl;
int progress = 0;
int rss[NR_MM_COUNTERS];
+ unsigned long orig_addr = addr;
swp_entry_t entry = (swp_entry_t){0};
again:
@@ -981,6 +982,15 @@ again:
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
+
+ /*
+ * Prevent the page fault handler to copy the page while stale tlb entry
+ * are still not flushed.
+ */
+ if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT) &&
+ is_cow_mapping(vma->vm_flags))
+ flush_tlb_range(vma, orig_addr, end);
+
spin_unlock(src_ptl);
pte_unmap(orig_src_pte);
add_mm_rss_vec(dst_mm, rss);